arm64: Track no early_pgtable_alloc() for kmemleak
ANBZ: #835 commitc6975d7cab
upstream. After switched page size from 64KB to 4KB on several arm64 servers here, kmemleak starts to run out of early memory pool due to a huge number of those early_pgtable_alloc() calls: kmemleak_alloc_phys() memblock_alloc_range_nid() memblock_phys_alloc_range() early_pgtable_alloc() init_pmd() alloc_init_pud() __create_pgd_mapping() __map_memblock() paging_init() setup_arch() start_kernel() Increased the default value of DEBUG_KMEMLEAK_MEM_POOL_SIZE by 4 times won't be enough for a server with 200GB+ memory. There isn't much interesting to check memory leaks for those early page tables and those early memory mappings should not reference to other memory. Hence, no kmemleak false positives, and we can safely skip tracking those early allocations from kmemleak like we did in the commitfed84c7852
("mm/memblock.c: skip kmemleak for kasan_init()") without needing to introduce complications to automatically scale the value depends on the runtime memory size etc. After the patch, the default value of DEBUG_KMEMLEAK_MEM_POOL_SIZE becomes sufficient again. [chg: fix arch/arm/mm/kasan_init.c conflict] Signed-off-by: Qian Cai <quic_qiancai@quicinc.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Mike Rapoport <rppt@linux.ibm.com> Link: https://lore.kernel.org/r/20211105150509.7826-1-quic_qiancai@quicinc.com Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Guanghui Feng <guanghuifeng@linux.alibaba.com> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
This commit is contained in:
parent
6f39158191
commit
07d87bc549
|
@ -34,7 +34,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
|
|||
{
|
||||
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
|
||||
__pa(MAX_DMA_ADDRESS),
|
||||
MEMBLOCK_ALLOC_KASAN, node);
|
||||
MEMBLOCK_ALLOC_NOLEAKTRACE, node);
|
||||
if (!p)
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
|
||||
__func__, PAGE_SIZE, PAGE_SIZE, node,
|
||||
|
@ -47,7 +47,8 @@ static phys_addr_t __init kasan_alloc_raw_page(int node)
|
|||
{
|
||||
void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
|
||||
__pa(MAX_DMA_ADDRESS),
|
||||
MEMBLOCK_ALLOC_KASAN, node);
|
||||
MEMBLOCK_ALLOC_NOLEAKTRACE,
|
||||
node);
|
||||
if (!p)
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
|
||||
__func__, PAGE_SIZE, PAGE_SIZE, node,
|
||||
|
|
|
@ -96,7 +96,8 @@ static phys_addr_t __init early_pgtable_alloc(int shift)
|
|||
phys_addr_t phys;
|
||||
void *ptr;
|
||||
|
||||
phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
|
||||
MEMBLOCK_ALLOC_NOLEAKTRACE);
|
||||
if (!phys)
|
||||
panic("Failed to allocate page table page\n");
|
||||
|
||||
|
|
|
@ -371,7 +371,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
|
|||
/* Flags for memblock allocation APIs */
|
||||
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
|
||||
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
|
||||
#define MEMBLOCK_ALLOC_KASAN 1
|
||||
#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
|
||||
|
||||
/* We are using top down, so it is safe to use 0 here */
|
||||
#define MEMBLOCK_LOW_LIMIT 0
|
||||
|
|
|
@ -287,7 +287,7 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
|
|||
{
|
||||
/* pump up @end */
|
||||
if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
|
||||
end == MEMBLOCK_ALLOC_KASAN)
|
||||
end == MEMBLOCK_ALLOC_NOLEAKTRACE)
|
||||
end = memblock.current_limit;
|
||||
|
||||
/* avoid allocating the first page */
|
||||
|
@ -1361,8 +1361,11 @@ again:
|
|||
return 0;
|
||||
|
||||
done:
|
||||
/* Skip kmemleak for kasan_init() due to high volume. */
|
||||
if (end != MEMBLOCK_ALLOC_KASAN)
|
||||
/*
|
||||
* Skip kmemleak for those places like kasan_init() and
|
||||
* early_pgtable_alloc() due to high volume.
|
||||
*/
|
||||
if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
|
||||
/*
|
||||
* The min_count is set to 0 so that memblock allocated
|
||||
* blocks are never reported as leaks. This is because many
|
||||
|
|
Loading…
Reference in New Issue