anolis: x86/csv: Manage CSV guest's private memory by CMA

ANBZ: #773

The private memory of a CSV guest is isolated from VMM and has to be
physically contiguous. CMA(Contiguous Memory Allocator) is a memory
allocator within the kernel for contiguous physical memory.

Use the CMA for the CSV private memory management.

Signed-off-by: Xin Jiang <jiangxin@hygon.cn>
Reviewed-by: Artie Ding <artie.ding@linux.alibaba.com>
Link: https://gitee.com/anolis/cloud-kernel/pulls/2178
This commit is contained in:
Xin Jiang 2023-08-10 14:21:16 +08:00 committed by 小龙
parent be50e38ed9
commit edbfb85283
8 changed files with 450 additions and 4 deletions

View File

@ -2064,6 +2064,21 @@ config EFI_MIXED
If unsure, say N.
config HYGON_CSV
bool "Hygon secure virtualization CSV support"
default n
depends on CPU_SUP_HYGON && AMD_MEM_ENCRYPT && CMA
help
CSV feature integrates secure processor, memory encryption and
memory isolation to provide the ability to protect guest's private
data. The CSV guest's context like CPU registers, control block
and nested page table is accessed only by the guest itself and
the secure processor. Neither other guests nor the host can tamper
with the guest's context.
Say yes to enable support for the Hygon secure virtualization
on hygon processor.
source "kernel/Kconfig.hz"
config KEXEC

View File

@ -0,0 +1,33 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Hygon China Secure Virtualization (CSV)
*
* Copyright (C) Hygon Info Technologies Ltd.
*
* Author: Jiang Xin <jiangxin@hygon.cn>
*/
#ifndef _ASM_X86_CSV_H
#define _ASM_X86_CSV_H
#ifdef CONFIG_HYGON_CSV
#define CSV_MR_ALIGN_BITS (28)
struct csv_mem {
uint64_t start;
uint64_t size;
};
extern struct csv_mem *csv_smr;
extern unsigned int csv_smr_num;
void __init early_csv_reserve_mem(void);
phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed,
unsigned int align);
void csv_release_to_contiguous(phys_addr_t pa, size_t size);
uint32_t csv_get_smr_entry_shift(void);
#endif
#endif

View File

@ -50,6 +50,9 @@
#include <asm/unwind.h>
#include <asm/vsyscall.h>
#include <linux/vmalloc.h>
#ifdef CONFIG_HYGON_CSV
#include <asm/csv.h>
#endif
/*
* max_low_pfn_mapped: highest directly mapped pfn < 4 GB
@ -1218,6 +1221,9 @@ void __init setup_arch(char **cmdline_p)
early_acpi_boot_init();
initmem_init();
#ifdef CONFIG_HYGON_CSV
early_csv_reserve_mem();
#endif
dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
if (boot_cpu_has(X86_FEATURE_GBPAGES))

View File

@ -60,3 +60,5 @@ obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o
obj-$(CONFIG_HYGON_CSV) += csv.o

367
arch/x86/mm/csv.c Normal file
View File

@ -0,0 +1,367 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hygon China Secure Virtualization (CSV)
*
* Copyright (C) Hygon Info Technologies Ltd.
*
* Author: Jiang Xin <jiangxin@hygon.cn>
*/
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/psp-sev.h>
#include <linux/cma.h>
#include <linux/minmax.h>
#include <linux/hugetlb.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
#include <asm/set_memory.h>
#include <asm/csv.h>
#undef pr_fmt
#define pr_fmt(fmt) "CSV-CMA: " fmt
#define NUM_SMR_ENTRIES (8 * 1024)
#define CSV_CMA_SHIFT PUD_SHIFT
#define CSV_CMA_SIZE (1 << CSV_CMA_SHIFT)
#define MIN_SMR_ENTRY_SHIFT 23
#define CSV_SMR_INFO_SIZE (nr_node_ids * sizeof(struct csv_mem))
struct used_hugetlb_migration_control {
spinlock_t lock;
int enabled_counts;
int last_value;
};
static struct used_hugetlb_migration_control control;
/* 0 percent of total memory by default*/
static unsigned char csv_mem_percentage;
static unsigned long csv_mem_size;
static int __init cmdline_parse_csv_mem_size(char *str)
{
unsigned long size;
char *endp;
if (str) {
size = memparse(str, &endp);
csv_mem_size = size;
if (!csv_mem_size)
csv_mem_percentage = 0;
}
return 0;
}
early_param("csv_mem_size", cmdline_parse_csv_mem_size);
static int __init cmdline_parse_csv_mem_percentage(char *str)
{
unsigned char percentage;
int ret;
if (!str)
return 0;
ret = kstrtou8(str, 10, &percentage);
if (!ret) {
csv_mem_percentage = min_t(unsigned char, percentage, 80);
if (csv_mem_percentage != percentage)
pr_warn("csv_mem_percentage is limited to 80.\n");
} else {
/* Disable CSV CMA. */
csv_mem_percentage = 0;
pr_err("csv_mem_percentage is invalid. (0 - 80) is expected.\n");
}
return ret;
}
early_param("csv_mem_percentage", cmdline_parse_csv_mem_percentage);
struct csv_mem *csv_smr;
EXPORT_SYMBOL_GPL(csv_smr);
unsigned int csv_smr_num;
EXPORT_SYMBOL_GPL(csv_smr_num);
struct csv_cma {
int fast;
struct cma *cma;
};
struct cma_array {
unsigned long count;
struct csv_cma csv_cma[];
};
static unsigned int smr_entry_shift;
static struct cma_array *csv_contiguous_pernuma_area[MAX_NUMNODES];
static void csv_set_smr_entry_shift(unsigned int shift)
{
smr_entry_shift = max_t(unsigned int, shift, MIN_SMR_ENTRY_SHIFT);
pr_info("SMR entry size is 0x%x\n", 1 << smr_entry_shift);
}
unsigned int csv_get_smr_entry_shift(void)
{
return smr_entry_shift;
}
EXPORT_SYMBOL_GPL(csv_get_smr_entry_shift);
static unsigned long __init present_pages_in_node(int nid)
{
unsigned long range_start_pfn, range_end_pfn;
unsigned long nr_present = 0;
int i;
for_each_mem_pfn_range(i, nid, &range_start_pfn, &range_end_pfn, NULL)
nr_present += range_end_pfn - range_start_pfn;
return nr_present;
}
static phys_addr_t __init csv_early_percent_memory_on_node(int nid)
{
return (present_pages_in_node(nid) * csv_mem_percentage / 100) << PAGE_SHIFT;
}
void __init csv_cma_reserve_mem(void)
{
int node, i;
unsigned long size;
int idx = 0;
int count;
int cma_array_size;
unsigned long max_spanned_size = 0;
csv_smr = memblock_alloc_node(CSV_SMR_INFO_SIZE, SMP_CACHE_BYTES, NUMA_NO_NODE);
if (!csv_smr) {
pr_err("Fail to allocate csv_smr\n");
return;
}
for_each_node_state(node, N_ONLINE) {
int ret;
char name[CMA_MAX_NAME];
struct cma_array *array;
unsigned long spanned_size;
unsigned long start = 0, end = 0;
struct csv_cma *csv_cma;
size = csv_early_percent_memory_on_node(node);
count = DIV_ROUND_UP(size, 1 << CSV_CMA_SHIFT);
if (!count)
continue;
cma_array_size = count * sizeof(*csv_cma) + sizeof(*array);
array = memblock_alloc_node(cma_array_size, SMP_CACHE_BYTES, NUMA_NO_NODE);
if (!array) {
pr_err("Fail to allocate cma_array\n");
continue;
}
array->count = 0;
csv_contiguous_pernuma_area[node] = array;
for (i = 0; i < count; i++) {
csv_cma = &array->csv_cma[i];
csv_cma->fast = 1;
snprintf(name, sizeof(name), "csv-n%dc%d", node, i);
ret = cma_declare_contiguous_nid(0, CSV_CMA_SIZE, 0,
1 << CSV_MR_ALIGN_BITS, PMD_SHIFT - PAGE_SHIFT,
false, name, &(csv_cma->cma), node);
if (ret) {
pr_warn("Fail to reserve memory size 0x%x node %d\n",
1 << CSV_CMA_SHIFT, node);
break;
}
if (start > cma_get_base(csv_cma->cma) || !start)
start = cma_get_base(csv_cma->cma);
if (end < cma_get_base(csv_cma->cma) + cma_get_size(csv_cma->cma))
end = cma_get_base(csv_cma->cma) + cma_get_size(csv_cma->cma);
}
if (!i)
continue;
array->count = i;
spanned_size = end - start;
if (spanned_size > max_spanned_size)
max_spanned_size = spanned_size;
csv_smr[idx].start = start;
csv_smr[idx].size = end - start;
idx++;
pr_info("Node %d - reserve size 0x%016lx, (expected size 0x%016lx)\n",
node, (unsigned long)i * CSV_CMA_SIZE, size);
}
csv_smr_num = idx;
WARN_ON((max_spanned_size / NUM_SMR_ENTRIES) < 1);
if (likely((max_spanned_size / NUM_SMR_ENTRIES) >= 1))
csv_set_smr_entry_shift(ilog2(max_spanned_size / NUM_SMR_ENTRIES - 1) + 1);
}
#define CSV_CMA_AREAS 2458
void __init early_csv_reserve_mem(void)
{
unsigned long total_pages;
if (!(boot_cpu_data.x86_vendor == X86_VENDOR_HYGON &&
boot_cpu_data.x86_model >= 0x4))
return;
if (cma_alloc_areas(CSV_CMA_AREAS))
return;
total_pages = PHYS_PFN(memblock_phys_mem_size());
if (csv_mem_size) {
if (csv_mem_size < (total_pages << PAGE_SHIFT)) {
csv_mem_percentage = csv_mem_size * 100 / (total_pages << PAGE_SHIFT);
if (csv_mem_percentage > 80)
csv_mem_percentage = 80; /* Maximum percentage */
} else
csv_mem_percentage = 80; /* Maximum percentage */
}
if (!csv_mem_percentage) {
pr_warn("Don't reserve any memory\n");
return;
}
csv_cma_reserve_mem();
}
static void enable_used_hugtlb_migration(void)
{
spin_lock(&control.lock);
if (!control.enabled_counts) {
control.last_value = sysctl_enable_used_hugtlb_migration;
sysctl_enable_used_hugtlb_migration = 1;
}
control.enabled_counts++;
spin_unlock(&control.lock);
}
static void disable_used_hugtlb_migration(void)
{
spin_lock(&control.lock);
control.enabled_counts--;
if (!control.enabled_counts)
sysctl_enable_used_hugtlb_migration = control.last_value;
spin_unlock(&control.lock);
}
phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed,
unsigned int align)
{
int nid;
int nr_nodes;
struct page *page = NULL;
phys_addr_t phys_addr;
int count;
struct csv_cma *csv_cma;
int fast = 1;
if (!nodes_allowed || size > CSV_CMA_SIZE) {
pr_err("Invalid params, size = 0x%lx, nodes_allowed = %p\n",
size, nodes_allowed);
return 0;
}
align = min_t(unsigned int, align, get_order(CSV_CMA_SIZE));
retry:
nr_nodes = nodes_weight(*nodes_allowed);
/* Traverse from current node */
nid = numa_node_id();
if (!node_isset(nid, *nodes_allowed))
nid = next_node_in(nid, *nodes_allowed);
for (; nr_nodes > 0; nid = next_node_in(nid, *nodes_allowed), nr_nodes--) {
struct cma_array *array = csv_contiguous_pernuma_area[nid];
if (!array)
continue;
count = array->count;
while (count) {
csv_cma = &array->csv_cma[count - 1];
/*
* The value check of csv_cma->fast is lockless, but
* that's ok as this don't affect functional correntness
* whatever the value of csv_cma->fast.
*/
if (fast && !csv_cma->fast) {
count--;
continue;
}
enable_used_hugtlb_migration();
page = cma_alloc(csv_cma->cma, PAGE_ALIGN(size) >> PAGE_SHIFT,
align, true);
disable_used_hugtlb_migration();
if (page) {
page->private = (unsigned long)csv_cma;
if (!csv_cma->fast)
csv_cma->fast = 1;
goto success;
} else
csv_cma->fast = 0;
count--;
}
}
if (fast) {
fast = 0;
goto retry;
} else {
pr_err("Fail to alloc secure memory(size = 0x%lx)\n", size);
return 0;
}
success:
phys_addr = page_to_phys(page);
clflush_cache_range(__va(phys_addr), size);
return phys_addr;
}
EXPORT_SYMBOL_GPL(csv_alloc_from_contiguous);
void csv_release_to_contiguous(phys_addr_t pa, size_t size)
{
struct csv_cma *csv_cma;
struct page *page = pfn_to_page(pa >> PAGE_SHIFT);
WARN_ON(!page);
if (likely(page)) {
csv_cma = (struct csv_cma *)page->private;
WARN_ON(!csv_cma);
if (likely(csv_cma)) {
page->private = 0;
csv_cma->fast = 1;
cma_release(csv_cma->cma, page, PAGE_ALIGN(size) >> PAGE_SHIFT);
}
}
}
EXPORT_SYMBOL_GPL(csv_release_to_contiguous);
static int __init csv_mm_init(void)
{
spin_lock_init(&control.lock);
control.enabled_counts = 0;
return 0;
}
static void __exit csv_mm_exit(void)
{
}
module_init(csv_mm_init);
module_exit(csv_mm_exit);

View File

@ -49,4 +49,5 @@ extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
extern int __init cma_alloc_areas(unsigned int max_cma_size);
#endif

View File

@ -35,7 +35,10 @@
#include "cma.h"
struct cma cma_areas[MAX_CMA_AREAS];
static struct cma cma_areas_data[MAX_CMA_AREAS];
static unsigned int cma_areas_size = MAX_CMA_AREAS;
struct cma *cma_areas = cma_areas_data;
unsigned cma_area_count;
static DEFINE_MUTEX(cma_mutex);
@ -152,6 +155,25 @@ static int __init cma_init_reserved_areas(void)
}
core_initcall(cma_init_reserved_areas);
int __init cma_alloc_areas(unsigned int max_cma_size)
{
struct cma *data;
if (max_cma_size <= MAX_CMA_AREAS)
return 0;
if (cma_area_count || cma_areas != cma_areas_data)
return -EPERM;
data = memblock_alloc(max_cma_size * sizeof(*cma_areas), SMP_CACHE_BYTES);
if (!data)
return -ENOMEM;
cma_areas = data;
cma_areas_size = max_cma_size;
return 0;
}
/**
* cma_init_reserved_mem() - create custom contiguous area from reserved memory
* @base: Base address of the reserved area
@ -173,7 +195,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
phys_addr_t alignment;
/* Sanity checks */
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
if (cma_area_count == cma_areas_size) {
pr_err("Not enough slots for CMA reserved regions!\n");
return -ENOSPC;
}
@ -253,7 +275,7 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
__func__, &size, &base, &limit, &alignment);
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
if (cma_area_count == cma_areas_size) {
pr_err("Not enough slots for CMA reserved regions!\n");
return -ENOSPC;
}

View File

@ -18,7 +18,7 @@ struct cma {
char name[CMA_MAX_NAME];
};
extern struct cma cma_areas[MAX_CMA_AREAS];
extern struct cma *cma_areas;
extern unsigned cma_area_count;
static inline unsigned long cma_bitmap_maxno(struct cma *cma)