This commit is contained in:
Altair 2024-01-14 17:58:13 +08:00
parent 23254b6b17
commit bc338801b4
4 changed files with 689 additions and 0 deletions

291
snippet/ebpf/memleak.bpf.c Normal file
View File

@ -0,0 +1,291 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2020 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "memleak.h"
#define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
#define USER_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK)
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, pid_t); // pid
__type(value, u64); // size for alloc
__uint(max_entries, 10240);
} sizes SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u64); /* alloc return address */
__type(value, struct alloc_info);
__uint(max_entries, ALLOCS_MAX_ENTRIES);
} allocs SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u64); /* stack id */
__type(value, union combined_alloc_info);
__uint(max_entries, COMBINED_ALLOCS_MAX_ENTRIES);
} combined_allocs SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u64); // pid
__type(value, u64); // 用户态指针变量 memptr
__uint(max_entries, 10240);
} memptrs SEC(".maps");
/* value stack id 对应的堆栈的深度
* max_entries: stack_idstack id都对应一个完整的堆栈
* 2使,ebpf中open之后load之前动态设置
*/
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__type(key, u32); /* stack id */
//__type(value, xxx); memleak_bpf__open 之后再动态设置
//__uint(max_entries, xxx); memleak_bpf__open 之后再动态设置
} stack_traces SEC(".maps");
char LICENSE[] SEC("license") = "Dual BSD/GPL";
/* 通用的内存分配 uprobe的处理逻辑
* (malloc, calloc等)
* size: , malloc
*/
static int gen_alloc_enter(size_t size)
{
const pid_t pid = bpf_get_current_pid_tgid() >> 32;
bpf_map_update_elem(&sizes, &pid, &size, BPF_ANY);
// bpf_printk("malloc_enter size=%d\n", size);
return 0;
}
/* 通用的内存分配 uretprobe的处理逻辑
* (malloc, calloc等)
* ctx: struct pt_regs , BPF_KRETPROBE
* address: , malloc
*/
static int gen_alloc_exit2(void *ctx, u64 address)
{
const u64 addr = (u64)address;
const pid_t pid = bpf_get_current_pid_tgid() >> 32;
struct alloc_info info;
const u64 * size = bpf_map_lookup_elem(&sizes, &pid);
if (NULL == size) {
return 0;
}
__builtin_memset(&info, 0, sizeof(info));
info.size = *size;
bpf_map_delete_elem(&sizes, &pid);
if (0 != address) {
info.stack_id = bpf_get_stackid(ctx, &stack_traces, USER_STACKID_FLAGS);
bpf_map_update_elem(&allocs, &addr, &info, BPF_ANY);
union combined_alloc_info add_cinfo = {
.total_size = info.size,
.number_of_allocs = 1
};
union combined_alloc_info * exist_cinfo = bpf_map_lookup_elem(&combined_allocs, &info.stack_id);
if (NULL == exist_cinfo) {
bpf_map_update_elem(&combined_allocs, &info.stack_id, &add_cinfo, BPF_NOEXIST);
}
else {
__sync_fetch_and_add(&exist_cinfo->bits, add_cinfo.bits);
}
}
// bpf_printk("malloc_exit address=%p\n", address);
return 0;
}
/* 把 gen_alloc_exit2 接口中的2个参数精简为1个参数
* BPF_KRETPROBE
*/
static int gen_alloc_exit(struct pt_regs *ctx)
{
return gen_alloc_exit2(ctx, PT_REGS_RC(ctx));
}
/* 通用的内存释放 uprobe的处理逻辑
* (free, munmap等)
* address: , free
*/
static int gen_free_enter(const void *address)
{
const u64 addr = (u64)address;
const struct alloc_info * info = bpf_map_lookup_elem(&allocs, &addr);
if (NULL == info) {
return 0;
}
union combined_alloc_info * exist_cinfo = bpf_map_lookup_elem(&combined_allocs, &info->stack_id);
if (NULL == exist_cinfo) {
return 0;
}
const union combined_alloc_info sub_cinfo = {
.total_size = info->size,
.number_of_allocs = 1
};
__sync_fetch_and_sub(&exist_cinfo->bits, sub_cinfo.bits);
bpf_map_delete_elem(&allocs, &addr);
// bpf_printk("free_enter address=%p\n", address);
return 0;
}
/////////////////////////////////////////////////////////////////////
SEC("uprobe")
int BPF_KPROBE(malloc_enter, size_t size)
{
return gen_alloc_enter(size);
}
SEC("uretprobe")
int BPF_KRETPROBE(malloc_exit)
{
return gen_alloc_exit(ctx);
}
SEC("uprobe")
int BPF_KPROBE(free_enter, void * address)
{
return gen_free_enter(address);
}
SEC("uprobe")
int BPF_KPROBE(posix_memalign_enter, void **memptr, size_t alignment, size_t size)
{
const u64 memptr64 = (u64)(size_t)memptr;
const u64 pid = bpf_get_current_pid_tgid() >> 32;
bpf_map_update_elem(&memptrs, &pid, &memptr64, BPF_ANY);
return gen_alloc_enter(size);
}
SEC("uretprobe")
int BPF_KRETPROBE(posix_memalign_exit)
{
const u64 pid = bpf_get_current_pid_tgid() >> 32;
u64 *memptr64;
void *addr;
memptr64 = bpf_map_lookup_elem(&memptrs, &pid);
if (!memptr64)
return 0;
bpf_map_delete_elem(&memptrs, &pid);
//通过 bpf_probe_read_user 读取保存在用户态指针变量(memptr64)中的 分配成功的内存指针
if (bpf_probe_read_user(&addr, sizeof(void*), (void*)(size_t)*memptr64))
return 0;
const u64 addr64 = (u64)(size_t)addr;
return gen_alloc_exit2(ctx, addr64);
}
SEC("uprobe")
int BPF_KPROBE(calloc_enter, size_t nmemb, size_t size)
{
return gen_alloc_enter(nmemb * size);
}
SEC("uretprobe")
int BPF_KRETPROBE(calloc_exit)
{
return gen_alloc_exit(ctx);
}
SEC("uprobe")
int BPF_KPROBE(realloc_enter, void *ptr, size_t size)
{
gen_free_enter(ptr);
return gen_alloc_enter(size);
}
SEC("uretprobe")
int BPF_KRETPROBE(realloc_exit)
{
return gen_alloc_exit(ctx);
}
SEC("uprobe")
int BPF_KPROBE(mmap_enter, void *address, size_t size)
{
return gen_alloc_enter(size);
}
SEC("uretprobe")
int BPF_KRETPROBE(mmap_exit)
{
return gen_alloc_exit(ctx);
}
SEC("uprobe")
int BPF_KPROBE(munmap_enter, void *address)
{
return gen_free_enter(address);
}
SEC("uprobe")
int BPF_KPROBE(aligned_alloc_enter, size_t alignment, size_t size)
{
return gen_alloc_enter(size);
}
SEC("uretprobe")
int BPF_KRETPROBE(aligned_alloc_exit)
{
return gen_alloc_exit(ctx);
}
SEC("uprobe")
int BPF_KPROBE(valloc_enter, size_t size)
{
return gen_alloc_enter(size);
}
SEC("uretprobe")
int BPF_KRETPROBE(valloc_exit)
{
return gen_alloc_exit(ctx);
}
SEC("uprobe")
int BPF_KPROBE(memalign_enter, size_t alignment, size_t size)
{
return gen_alloc_enter(size);
}
SEC("uretprobe")
int BPF_KRETPROBE(memalign_exit)
{
return gen_alloc_exit(ctx);
}
SEC("uprobe")
int BPF_KPROBE(pvalloc_enter, size_t size)
{
return gen_alloc_enter(size);
}
SEC("uretprobe")
int BPF_KRETPROBE(pvalloc_exit)
{
return gen_alloc_exit(ctx);
}

306
snippet/ebpf/memleak.c Normal file
View File

@ -0,0 +1,306 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2020 Facebook */
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/resource.h>
#include <bpf/libbpf.h>
#include "memleak.skel.h"
#include "memleak.h"
#include <assert.h>
#include "blazesym.h"
static const int perf_max_stack_depth = 127; //stack id 对应的堆栈的深度
static const int stack_map_max_entries = 10240; //最大允许存储多少个stack_id每个stack id都对应一个完整的堆栈
static __u64 * g_stacks = NULL;
static size_t g_stacks_size = 0;
static const char * p_print_file = "/tmp/memleak_print";
static const char * p_quit_file = "/tmp/memleak_quit";
static int attach_pid;
static char binary_path[128] = {0};
#define __ATTACH_UPROBE(skel, sym_name, prog_name, is_retprobe) \
do { \
LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts, \
.func_name = #sym_name, \
.retprobe = is_retprobe); \
skel->links.prog_name = bpf_program__attach_uprobe_opts( \
skel->progs.prog_name, \
attach_pid, \
binary_path, \
0, \
&uprobe_opts); \
} while (false)
#define __CHECK_PROGRAM(skel, prog_name) \
do { \
if (!skel->links.prog_name) { \
perror("no program attached for " #prog_name); \
return -errno; \
} \
} while (false)
#define __ATTACH_UPROBE_CHECKED(skel, sym_name, prog_name, is_retprobe) \
do { \
__ATTACH_UPROBE(skel, sym_name, prog_name, is_retprobe); \
__CHECK_PROGRAM(skel, prog_name); \
} while (false)
/* ATTACH_UPROBE_CHECKED 和 ATTACH_UPROBE 宏的区别是:
* ATTACH_UPROBE_CHECKED elf文件中( libc.so) uprobe attach (malloc)
*
* ATTACH_UPROBE uprobe attach,
*/
#define ATTACH_UPROBE(skel, sym_name, prog_name) __ATTACH_UPROBE(skel, sym_name, prog_name, false)
#define ATTACH_URETPROBE(skel, sym_name, prog_name) __ATTACH_UPROBE(skel, sym_name, prog_name, true)
#define ATTACH_UPROBE_CHECKED(skel, sym_name, prog_name) __ATTACH_UPROBE_CHECKED(skel, sym_name, prog_name, false)
#define ATTACH_URETPROBE_CHECKED(skel, sym_name, prog_name) __ATTACH_UPROBE_CHECKED(skel, sym_name, prog_name, true)
static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
{
return vfprintf(stderr, format, args);
}
static struct blaze_symbolizer *symbolizer;
static void show_stack_trace(__u64 *stack, int stack_sz, pid_t pid)
{
const struct blaze_result *result;
const struct blaze_sym *sym;
int i, j;
assert(sizeof(uintptr_t) == sizeof(uint64_t));
if (pid) {
struct blaze_symbolize_src_process src = {
.pid = pid,
};
result = blaze_symbolize_process(symbolizer, &src, (const uintptr_t *)stack, stack_sz);
} else {
struct blaze_symbolize_src_kernel src = {};
result = blaze_symbolize_kernel(symbolizer, &src, (const uintptr_t *)stack, stack_sz);
}
for (i = 0; i < stack_sz; i++) {
if (!result || result->size <= i || !result->entries[i].size) {
printf(" %2d [<%016llx>]\n", i, stack[i]);
continue;
}
if (result->entries[i].size == 1) {
sym = &result->entries[i].syms[0];
if (sym->dir && sym->dir[0] != '\0' && sym->file && sym->file[0] != '\0') {
printf(" %2d [<%016llx>] %s+0x%lx %s/%s:%u\n", i, stack[i],
sym->name, sym->offset, sym->dir, sym->file, sym->line);
} else if (sym->file && sym->file[0] != '\0') {
printf(" %2d [<%016llx>] %s+0x%lx %s:%u\n", i, stack[i],
sym->name, sym->offset, sym->file, sym->line);
} else {
printf(" %2d [<%016llx>] %s+0x%lx\n", i, stack[i], sym->name, sym->offset);
}
continue;
}
printf(" %2d [<%016llx>]\n", i, stack[i]);
for (j = 0; j < result->entries[i].size; j++) {
sym = &result->entries[i].syms[j];
if (sym->dir && sym->dir[0] != '\0' && sym->file && sym->file[0] != '\0') {
printf(" %s+0x%lx %s/%s:%u\n", sym->name,
sym->offset, sym->dir, sym->file, sym->line);
} else if (sym->file && sym->file[0] != '\0') {
printf(" %s+0x%lx %s:%u\n", sym->name,
sym->offset, sym->file, sym->line);
} else {
printf(" %s+0x%lx\n", sym->name, sym->offset);
}
}
}
blaze_result_free(result);
}
int print_outstanding_combined_allocs(struct memleak_bpf * skel, pid_t pid)
{
const size_t combined_allocs_key_size = bpf_map__key_size(skel->maps.combined_allocs);
const size_t stack_traces_key_size = bpf_map__key_size(skel->maps.stack_traces);
for (__u64 prev_key = 0, curr_key = 0; ; prev_key = curr_key) {
if (bpf_map__get_next_key(skel->maps.combined_allocs,
&prev_key, &curr_key, combined_allocs_key_size)) {
if (errno == ENOENT) {
break; //no more keys, done!
}
perror("map get next key failed!");
return -errno;
}
// stack_id = curr_key
union combined_alloc_info cinfo;
memset(&cinfo, 0, sizeof(cinfo));
if (bpf_map__lookup_elem(skel->maps.combined_allocs,
&curr_key, combined_allocs_key_size, &cinfo, sizeof(cinfo), 0)) {
if (errno == ENOENT) {
continue;
}
perror("map lookup failed!");
return -errno;
}
if (bpf_map__lookup_elem(skel->maps.stack_traces,
&curr_key, stack_traces_key_size, g_stacks, g_stacks_size, 0)) {
perror("failed to lookup stack traces!");
return -errno;
}
printf("stack_id=0x%llx with outstanding allocations: total_size=%llu nr_allocs=%llu\n",
curr_key, (__u64)cinfo.total_size, (__u64)cinfo.number_of_allocs);
int stack_sz = 0;
for (int i = 0; i < perf_max_stack_depth; i++) {
if (0 == g_stacks[i]) {
break;
}
// printf("[%3d] 0x%llx\n", i, g_stacks[i]);
stack_sz++;
}
show_stack_trace(g_stacks, stack_sz, pid);
}
return 0;
}
int attach_uprobes(struct memleak_bpf *skel)
{
ATTACH_UPROBE_CHECKED(skel, malloc, malloc_enter);
ATTACH_URETPROBE_CHECKED(skel, malloc, malloc_exit);
ATTACH_UPROBE_CHECKED(skel, free, free_enter);
ATTACH_UPROBE_CHECKED(skel, posix_memalign, posix_memalign_enter);
ATTACH_URETPROBE_CHECKED(skel, posix_memalign, posix_memalign_exit);
ATTACH_UPROBE_CHECKED(skel, calloc, calloc_enter);
ATTACH_URETPROBE_CHECKED(skel, calloc, calloc_exit);
ATTACH_UPROBE_CHECKED(skel, realloc, realloc_enter);
ATTACH_URETPROBE_CHECKED(skel, realloc, realloc_exit);
ATTACH_UPROBE_CHECKED(skel, mmap, mmap_enter);
ATTACH_URETPROBE_CHECKED(skel, mmap, mmap_exit);
ATTACH_UPROBE_CHECKED(skel, memalign, memalign_enter);
ATTACH_URETPROBE_CHECKED(skel, memalign, memalign_exit);
ATTACH_UPROBE_CHECKED(skel, free, free_enter);
ATTACH_UPROBE_CHECKED(skel, munmap, munmap_enter);
// the following probes are intentinally allowed to fail attachment
// deprecated in libc.so bionic
ATTACH_UPROBE(skel, valloc, valloc_enter);
ATTACH_URETPROBE(skel, valloc, valloc_exit);
// deprecated in libc.so bionic
ATTACH_UPROBE(skel, pvalloc, pvalloc_enter);
ATTACH_URETPROBE(skel, pvalloc, pvalloc_exit);
// added in C11
ATTACH_UPROBE(skel, aligned_alloc, aligned_alloc_enter);
ATTACH_URETPROBE(skel, aligned_alloc, aligned_alloc_exit);
return 0;
}
int main(int argc, char **argv)
{
struct memleak_bpf *skel;
int err, i;
LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
if (2 != argc)
{
printf("usage:%s attach_pid\n", argv[0]);
return -1;
}
attach_pid = atoi(argv[1]);
strcpy(binary_path, "/lib/x86_64-linux-gnu/libc.so.6");
/* Set up libbpf errors and debug info callback */
libbpf_set_print(libbpf_print_fn);
/* Load and verify BPF application */
skel = memleak_bpf__open();
if (!skel) {
fprintf(stderr, "Failed to open BPF skeleton\n");
return 1;
}
bpf_map__set_value_size(skel->maps.stack_traces, perf_max_stack_depth * sizeof(__u64));
bpf_map__set_max_entries(skel->maps.stack_traces, stack_map_max_entries);
err = memleak_bpf__load(skel);
if (err) {
fprintf(stderr, "Failed to load BPF skeleton\n");
goto cleanup;
}
err = attach_uprobes(skel);
if (err) {
fprintf(stderr, "failed to attach uprobes\n");
goto cleanup;
}
/* Let libbpf perform auto-attach for uprobe_sub/uretprobe_sub
* NOTICE: we provide path and symbol info in SEC for BPF programs
*/
err = memleak_bpf__attach(skel);
if (err) {
fprintf(stderr, "Failed to auto-attach BPF skeleton: %d\n", err);
goto cleanup;
}
g_stacks_size = perf_max_stack_depth * sizeof(*g_stacks);
g_stacks = (__u64 *)malloc(g_stacks_size);
memset(g_stacks, 0, g_stacks_size);
symbolizer = blaze_symbolizer_new();
if (!symbolizer) {
fprintf(stderr, "Fail to create a symbolizer\n");
err = -1;
goto cleanup;
}
// printf("Successfully started! Please run `sudo cat /sys/kernel/debug/tracing/trace_pipe` "
// "to see output of the BPF programs.\n");
for (i = 0;; i++) {
if (0 == access(p_quit_file, F_OK)) {
remove(p_quit_file);
break;
}
else if (0 == access(p_print_file, F_OK)) {
remove(p_print_file);
print_outstanding_combined_allocs(skel, attach_pid);
}
usleep(100000);
}
cleanup:
memleak_bpf__destroy(skel);
blaze_symbolizer_free(symbolizer);
free(g_stacks);
return -err;
}

25
snippet/ebpf/memleak.h Normal file
View File

@ -0,0 +1,25 @@
#ifndef __MEMLEAK_H
#define __MEMLEAK_H
#define ALLOCS_MAX_ENTRIES 1000000
#define COMBINED_ALLOCS_MAX_ENTRIES 10240
struct alloc_info {
__u64 size;
int stack_id;
};
/* 为了节省内存和方便整形数据的原子操作,把 combined_alloc_info 定义为联合体
* total_size 40bit, number_of_allocs 24bit, 64bit
* 2combined_alloc_info联合体的 bits , total_size ,
* number_of_allocs ;
*/
union combined_alloc_info {
struct {
__u64 total_size : 40;
__u64 number_of_allocs : 24;
};
__u64 bits;
};
#endif /* __MEMLEAK_H */

View File

@ -0,0 +1,67 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#if 0
static void * alloc_v3(int alloc_size)
{
void * ptr = malloc(alloc_size);
return ptr;
}
#endif
#if 0
static void * alloc_v3(int alloc_size)
{
void * memptr = NULL;
posix_memalign(&memptr, 128, 1024);
return memptr;
}
#endif
#if 1
static void * alloc_v3(int alloc_size)
{
void * ptr = new char[alloc_size];
return ptr;
}
#endif
static void * alloc_v2(int alloc_size)
{
void * ptr = alloc_v3(alloc_size);
return ptr;
}
static void * alloc_v1(int alloc_size)
{
void * ptr = alloc_v2(alloc_size);
return ptr;
}
int main(int argc, char * argv[])
{
const int alloc_size = 4;
void * ptr = NULL;
int i = 0;
for (i = 0; ; i++)
{
ptr = alloc_v1(alloc_size);
sleep(2);
if (0 == i % 2)
{
// free(ptr);
delete [] (char *)ptr;
}
}
return 0;
}