kvm: add cvm host feature

ANBZ: #11194

commit ae80c7e2f5cd3e50d1f360c387a33088310d54a0 openeuler

*********************************************************
collectively squash all patches of cvm feature for ci
entrance guard. including 30 patches from openeuler, and
the last patch is added to solve compilation caused by
those former 30 pathes. original recording info of each
patch is as follow
*********************************************************
virtcca inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X

--------------------------------

Add host support for Confidential VMs:
1. Add new kvm_type for cvm.
2. Init cvm related data while user create vm with cvm type.
3. Add cvm hypervisor which run in sel2 which named tmm.
4. Kvm call tmm interface to create cvm stage2 pagetable and run cvm.

Signed-off-by: Jingxian He <hejingxian@huawei.com>
Signed-off-by: Min Li <gumi@linux.alibaba.com

pmu: enable pmu phys irq inject for cvm
commit 4ea45d2fd512642870b39bbcf269e519a6cd02f3 openeuler
kvm: add support for multi-numa and fix kabi change
commit f9e08ad0b3f11416c9f69219a83c1816e5565c37 openeuler
swiotlb: Add restricted DMA alloc/free support
commit 13c1076273aacace25ad0f21e29793f577e106a4 openeuler
swiotlb: add swiotlb io_tlb_list release slots methd
commit 876f29b7dcb14e7d691629ed6a0e17f057d8ca43 openeuler
mm: enable swiotlb alloc for cvm share mem
commit 51ad7abc32796cb2489e1eeccb7d349a4ea5d8c2 openeuler
config: modify arm64 defconfig for cvm feature
commit 49c1f9f2bb76398db3bd4755373df47bcedb5add openeuler
gicv3: add lpi support for cvm guest
commit b53706136c0e86d7873b4854f965930bee8e2567 openeuler
cvm: enable secure memory alloc on multiple numa nodes
commit b82426dd122648291aca77c9b68d45eb7a3b467b openeuler
cvm: add secure memory query method
commit e619ef917255002d7d3d3b94ec71d0b631154e3c openeuler
cvm: improve security for cvm host feature
commit 5dfcf9d3eb22d9a40d3f1753ef7398bec3fd2f14 openeuler
cvm: clean code for cvm init
commit a787bb8a1cc90bc79b9ffe12dc36deeff93629d6 openeuler
cvm: enhance security for cvm host feature
commit 8b7b7e707808193e420eafc49aa4bde0def1c53a openeuler
cvm: delete dead code and resolve macro definition holes
commit 949fdbfafae331ce9543e5e805774896f5f67177 openeuler
IMA: Support the measurement extending of TSI TMM
commit b1410546d39f1e5531f2318ee010d1d767561794 openeuler
cvm_tsi: add cvm tsi interface
commit 31071f4b2a0b2c18ad45d9d2c621e1366bf331e7 openeuler
IMA: Check cvm world before call smc function
commit c880881e5a59128f8fe6517839f3ed039dcc683a openeuler
cvm_tsi: Fix security issue for Confidential cVM TSI
commit a62183c9eaf4ec976ad238924c5fa7fae582cb1a openeuler
ima: Fix violation digests extending issue in cvm
commit 9e51de29d10f683d69ecc7dcc3474c4566548563 openeuler
Fix token error issue when concurrent calls
commit b7a88d85242bdf7331e33b45333f8dfb72fcabf5 openeuler
arm64: Expand ESR_ELx_WFx_ISS_TI to match its ARMv8.7 definition
commit af2000e6d6313108c1eccdb3148d4a1c2f025875 openeuler
arm64: Add RV and RN fields for ESR_ELx_WFx_ISS
commit 308b6b0f74ea76c17fdd287ab06b3cb9afaef9e9 openeuler
KVM: arm64: Simplify kvm_cpu_has_pending_timer()
commit f880a467846df82f330dfc3227a0aa136dd69b9e openeuler
KVM: arm64: Introduce kvm_counter_compute_delta() helper
commit 2173e07423136caa98ba07d973e0ead24650a13e openeuler
KVM: arm64: Handle blocking WFIT instruction
commit 3c52cea7f59d9d2edeaf416d274ba2b28c914d8a openeuler
KVM: arm64: Offer early resume for non-blocking WFxT instructions
commit 3eaf467a6ac29f6be4d972872cac28cd72a3aeb0 openeuler
KVM: arm64: Expose the WFXT feature to guests
commit da0fcf7a6ed265f7143c5b149855a30f2c4f9033 openeuler
arm64: Add HWCAP advertising FEAT_WFXT
commit c00c6ce6414313e84be698ede8fdaf48abd9aaa5 openeuler
arm64: Add wfet()/wfit() helpers
commit 9a0b3b688a0fe8dc79568dc1be040c7bb9e801d4 openeuler
arm64: Use WFxT for __delay() when possible
commit 62291a4e1f2731dd95db0e1989684eeb12df3546 openeuler
arm64: solve some errors of compilation due introducing cvm feature of kunpeng920b

Signed-off-by: Min Li <gumi@linux.alibaba.com>
Reviewed-by: Guanghui Feng <guanghuifeng@linux.alibaba.com>
Reviewed-by: Jay Chen <jkchen@linux.alibaba.com>
Reviewed-by: Guixin Liu <kanie@linux.alibaba.com>
Reviewed-by: Xunlei Pang <xlpang@linux.alibaba.com>
Link: https://gitee.com/anolis/cloud-kernel/pulls/4258
This commit is contained in:
Jingxian He 2024-04-28 11:00:59 +08:00 committed by 小龙
parent 526ebad9ef
commit 950f0599c8
64 changed files with 3526 additions and 87 deletions

View File

@ -292,6 +292,8 @@ infrastructure:
+------------------------------+---------+---------+
| RPRES | [7-4] | y |
+------------------------------+---------+---------+
| WFXT | [3-0] | y |
+------------------------------+---------+---------+
Appendix I: Example

View File

@ -257,6 +257,10 @@ HWCAP2_RPRES
Functionality implied by ID_AA64ISAR2_EL1.RPRES == 0b0001.
HWCAP2_WFXT
Functionality implied by ID_AA64ISAR2_EL1.WFXT == 0b0010.
4. Unused AT_HWCAP bits
-----------------------

View File

@ -0,0 +1 @@
CONFIG_CVM_GUEST=y

View File

@ -0,0 +1 @@
CONFIG_CVM_HOST=y

View File

@ -0,0 +1 @@
CONFIG_DMA_RESTRICTED_POOL=y

View File

@ -2073,6 +2073,14 @@ config ARM64_CPU_EXTENDED_CTRL
This option provides the IMP_CPUECTLR_EL1 configuration interface
under /sys/devices/system/cpu/cpuX/.
config CVM_GUEST
bool "Enable cvm guest run"
depends on DMA_RESTRICTED_POOL
help
Support CVM guest based on S-EL2
If unsure, say N.
menu "Power management options"
source "kernel/power/Kconfig"

View File

@ -16,7 +16,11 @@
#define sev() asm volatile("sev" : : : "memory")
#define wfe() asm volatile("wfe" : : : "memory")
#define wfet(val) asm volatile("msr s0_3_c1_c0_0, %0" \
: : "r" (val) : "memory")
#define wfi() asm volatile("wfi" : : : "memory")
#define wfit(val) asm volatile("msr s0_3_c1_c0_1, %0" \
: : "r" (val) : "memory")
#define isb() asm volatile("isb" : : : "memory")
#define dmb(opt) asm volatile("dmb " #opt : : : "memory")

View File

@ -70,6 +70,7 @@
#define ARM64_WORKAROUND_HISILICON_1980005 62
#define ARM64_HAS_TWED 63
#define ARM64_HAS_ECV 64
#define ARM64_NCAPS 65
#define ARM64_HAS_WFXT 65
#define ARM64_NCAPS 66
#endif /* __ASM_CPUCAPS_H */

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved.
*/
#ifndef __CVM_GUEST_H
#define __CVM_GUEST_H
#ifdef CONFIG_CVM_GUEST
struct device;
extern int set_cvm_memory_encrypted(unsigned long addr, int numpages);
extern int set_cvm_memory_decrypted(unsigned long addr, int numpages);
extern bool is_cvm_world(void);
#define is_swiotlb_for_alloc is_swiotlb_for_alloc
static inline bool is_swiotlb_for_alloc(struct device *dev)
{
/* Force dma alloc by swiotlb in Confidential VMs */
return is_cvm_world();
}
extern void __init swiotlb_cvm_update_mem_attributes(void);
extern void cvm_tsi_init(void);
#else
static inline int set_cvm_memory_encrypted(unsigned long addr, int numpages)
{
return 0;
}
static inline int set_cvm_memory_decrypted(unsigned long addr, int numpages)
{
return 0;
}
static inline bool is_cvm_world(void)
{
return false;
}
static inline void __init swiotlb_cvm_update_mem_attributes(void) {}
static inline void cvm_tsi_init(void) {}
#endif /* CONFIG_CVM_GUEST */
#endif /* __CVM_GUEST_H */

View File

@ -0,0 +1,168 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_CVM_SMC_H_
#define __ASM_CVM_SMC_H_
#ifdef CONFIG_CVM_GUEST
#include <linux/arm-smccc.h>
#include <asm/cvm_tsi.h>
#include <linux/slab.h>
#define SMC_TSI_CALL_BASE 0xC4000000
#define TSI_ABI_VERSION_MAJOR 1
#define TSI_ABI_VERSION_MINOR 0
#define TSI_ABI_VERSION ((TSI_ABI_VERSION_MAJOR << 16) | TSI_ABI_VERSION_MINOR)
#define TSI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16)
#define TSI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF)
#define TSI_SUCCESS 0
#define TSI_ERROR_INPUT 1
#define TSI_ERROR_STATE 2
#define TSI_INCOMPLETE 3
#define SMC_TSI_FID(_x) (SMC_TSI_CALL_BASE + (_x))
#define SMC_TSI_ABI_VERSION SMC_TSI_FID(0x190)
/*
* arg1: Index, which measurements slot to read
* arg2: Measurement value
* ret0: Status / error
*/
#define SMC_TSI_MEASUREMENT_READ SMC_TSI_FID(0x192)
/*
* arg1: Index, which measurements slot to extend
* arg2: Size of realm measurement in bytes, max 64 bytes
* arg3: Measurement value
* ret0: Status / error
*/
#define SMC_TSI_MEASUREMENT_EXTEND SMC_TSI_FID(0x193)
/*
* arg1: Challenge value
* ret0: Status / error
* ret1: Upper bound on attestation token size in bytes
*/
#define SMC_TSI_ATTESTATION_TOKEN_INIT SMC_TSI_FID(0x194)
/*
* arg1: IPA of the Granule to which the token will be written
* arg2: Offset within Granule to start of buffer in bytes
* arg3: Size of buffer in bytes
* ret0: Status / error
* ret1: Number of bytes written to buffer
*/
#define SMC_TSI_ATTESTATION_TOKEN_CONTINUE SMC_TSI_FID(0x195)
/*
* arg1: struct cVM config addr
* ret0: Status / error
*/
#define SMC_TSI_CVM_CONFIG SMC_TSI_FID(0x196)
/*
* arg1: Device cert buffer
* arg2: Size of buffer in bytes
* ret0: Status / error
*/
#define SMC_TSI_DEVICE_CERT SMC_TSI_FID(0x19A)
static inline unsigned long tsi_get_version(void)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(SMC_TSI_ABI_VERSION, &res);
return res.a0;
}
static inline unsigned long tsi_get_cvm_config(struct cvm_config *cfg)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(SMC_TSI_CVM_CONFIG, &res);
cfg->ipa_bits = res.a1;
cfg->algorithm = res.a2;
return res.a0;
}
static inline unsigned long tsi_measurement_extend(struct cvm_measurement_extend *cvm_meas_ext)
{
struct arm_smccc_res res;
unsigned char *value;
value = kmalloc(MAX_MEASUREMENT_SIZE, GFP_KERNEL);
if (!value)
return -ENOMEM;
memcpy(value, cvm_meas_ext->value, MAX_MEASUREMENT_SIZE);
arm_smccc_1_1_smc(SMC_TSI_MEASUREMENT_EXTEND, cvm_meas_ext->index,
cvm_meas_ext->size, virt_to_phys(value), &res);
kfree(value);
return res.a0;
}
static inline unsigned long tsi_measurement_read(struct cvm_measurement *cvm_meas)
{
struct arm_smccc_res res;
unsigned char *value;
value = kmalloc(MAX_MEASUREMENT_SIZE, GFP_KERNEL);
if (!value)
return -ENOMEM;
arm_smccc_1_1_smc(SMC_TSI_MEASUREMENT_READ, cvm_meas->index,
virt_to_phys(value), &res);
memcpy(cvm_meas->value, value, MAX_MEASUREMENT_SIZE);
kfree(value);
return res.a0;
}
static inline unsigned long tsi_attestation_token_init(unsigned char *challenge)
{
struct arm_smccc_res res;
unsigned char *buf;
buf = kmalloc(CHALLENGE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, challenge, CHALLENGE_SIZE);
arm_smccc_1_1_smc(SMC_TSI_ATTESTATION_TOKEN_INIT, virt_to_phys(buf), &res);
kfree(buf);
return res.a0;
}
static inline unsigned long tsi_attestation_token_continue(struct cvm_token_granule *token_granule)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(SMC_TSI_ATTESTATION_TOKEN_CONTINUE, virt_to_phys(token_granule->ipa),
token_granule->offset, token_granule->size, &res);
token_granule->num_wr_bytes = res.a1;
return res.a0;
}
static inline unsigned long tsi_get_device_cert(unsigned char *device_cert,
unsigned long *device_cert_size)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(SMC_TSI_DEVICE_CERT, virt_to_phys(device_cert), *device_cert_size, &res);
*device_cert_size = res.a1;
return res.a0;
}
#endif /* CONFIG_CVM_GUEST */
#endif /* __ASM_CVM_SMC_H_ */

View File

@ -132,7 +132,10 @@
#define ESR_ELx_CV (UL(1) << 24)
#define ESR_ELx_COND_SHIFT (20)
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
#define ESR_ELx_WFx_ISS_TI (UL(1) << 0)
#define ESR_ELx_WFx_ISS_RN (UL(0x1F) << 5)
#define ESR_ELx_WFx_ISS_RV (UL(1) << 2)
#define ESR_ELx_WFx_ISS_TI (UL(3) << 0)
#define ESR_ELx_WFx_ISS_WFxT (UL(2) << 0)
#define ESR_ELx_WFx_ISS_WFI (UL(0) << 0)
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
@ -145,7 +148,8 @@
#define DISR_EL1_ESR_MASK (ESR_ELx_AET | ESR_ELx_EA | ESR_ELx_FSC)
/* ESR value templates for specific events */
#define ESR_ELx_WFx_MASK (ESR_ELx_EC_MASK | ESR_ELx_WFx_ISS_TI)
#define ESR_ELx_WFx_MASK (ESR_ELx_EC_MASK | \
(ESR_ELx_WFx_ISS_TI & ~ESR_ELx_WFx_ISS_WFxT))
#define ESR_ELx_WFx_WFI_VAL ((ESR_ELx_EC_WFx << ESR_ELx_EC_SHIFT) | \
ESR_ELx_WFx_ISS_WFI)

View File

@ -108,6 +108,7 @@
#define KERNEL_HWCAP_ECV __khwcap2_feature(ECV)
#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP)
#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES)
#define KERNEL_HWCAP_WFXT __khwcap2_feature(WFXT)
/*
* This yields a mask that user programs can use to figure out what

View File

@ -537,4 +537,24 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
return test_bit(feature, vcpu->arch.features);
}
#ifdef CONFIG_CVM_HOST
static inline bool kvm_is_cvm(struct kvm *kvm)
{
if (static_branch_unlikely(&kvm_cvm_is_available)) {
struct cvm *cvm = kvm->arch.cvm;
return cvm && cvm->is_cvm;
}
return false;
}
static inline enum cvm_state kvm_cvm_state(struct kvm *kvm)
{
struct cvm *cvm = kvm->arch.cvm;
if (!cvm)
return 0;
return READ_ONCE(cvm->state);
}
#endif
#endif /* __ARM64_KVM_EMULATE_H__ */

View File

@ -26,6 +26,9 @@
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/thread_info.h>
#ifdef CONFIG_CVM_HOST
#include <asm/kvm_tmm.h>
#endif
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@ -123,7 +126,20 @@ struct kvm_arch {
#ifdef CONFIG_KVM_HISI_VIRT
spinlock_t dvm_lock;
cpumask_t *dvm_cpumask; /* Union of all vcpu's cpus_ptr */
#endif
#if defined(CONFIG_KVM_HISI_VIRT) || defined(CONFIG_CVM_HOST)
#ifndef __GENKSYMS__
union {
cpumask_t *dvm_cpumask; /* Union of all vcpu's cpus_ptr */
void *cvm;
};
#else
cpumask_t *dvm_cpumask; /* Union of all vcpu's cpus_ptr */
#endif
#endif
#ifdef CONFIG_KVM_HISI_VIRT
u64 lsudvmbm_el2;
#endif
};
@ -399,8 +415,18 @@ struct kvm_vcpu_arch {
#ifdef CONFIG_KVM_HISI_VIRT
/* Copy of current->cpus_ptr */
cpumask_t *cpus_ptr;
#endif
#if defined(CONFIG_KVM_HISI_VIRT) || defined(CONFIG_CVM_HOST)
#ifndef __GENKSYMS__
union {
cpumask_t *pre_cpus_ptr;
void *tec;
};
#else
cpumask_t *pre_cpus_ptr;
#endif
#endif
};
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@ -430,6 +456,7 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
#define KVM_ARM64_WFIT (1 << 16) /* WFIT instruction trapped */
#define vcpu_has_sve(vcpu) (system_supports_sve() && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))

View File

@ -0,0 +1,352 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2024, The Linux Foundation. All rights reserved.
*/
#ifndef __TMM_TMI_H
#define __TMM_TMI_H
#ifdef CONFIG_CVM_HOST
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_pgtable.h>
#include <linux/virtio_ring.h>
#include <asm/sysreg.h>
#define GRANULE_SIZE 4096
#define NO_NUMA 0 /* numa bitmap */
#define TMM_TTT_LEVEL_2 2
#define TMM_TTT_LEVEL_3 3
/* TMI error codes. */
#define TMI_SUCCESS 0
#define TMI_ERROR_INPUT 1
#define TMI_ERROR_MEMORY 2
#define TMI_ERROR_ALIAS 3
#define TMI_ERROR_IN_USE 4
#define TMI_ERROR_CVM_STATE 5
#define TMI_ERROR_OWNER 6
#define TMI_ERROR_TEC 7
#define TMI_ERROR_TTT_WALK 8
#define TMI_ERROR_TTT_ENTRY 9
#define TMI_ERROR_NOT_SUPPORTED 10
#define TMI_ERROR_INTERNAL 11
#define TMI_ERROR_CVM_POWEROFF 12
#define TMI_ERROR_TTT_CREATED 13
#define TMI_RETURN_STATUS(ret) ((ret) & 0xFF)
#define TMI_RETURN_INDEX(ret) (((ret) >> 8) & 0xFF)
#define TMI_FEATURE_REGISTER_0_S2SZ GENMASK(7, 0)
#define TMI_FEATURE_REGISTER_0_LPA2 BIT(8)
#define TMI_FEATURE_REGISTER_0_SVE_EN BIT(9)
#define TMI_FEATURE_REGISTER_0_SVE_VL GENMASK(13, 10)
#define TMI_FEATURE_REGISTER_0_NUM_BPS GENMASK(17, 14)
#define TMI_FEATURE_REGISTER_0_NUM_WPS GENMASK(21, 18)
#define TMI_FEATURE_REGISTER_0_PMU_EN BIT(22)
#define TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS GENMASK(27, 23)
#define TMI_FEATURE_REGISTER_0_HASH_SHA_256 BIT(28)
#define TMI_FEATURE_REGISTER_0_HASH_SHA_512 BIT(29)
#define TMI_CVM_PARAM_FLAG_LPA2 BIT(0)
#define TMI_CVM_PARAM_FLAG_SVE BIT(1)
#define TMI_CVM_PARAM_FLAG_PMU BIT(2)
/*
* Many of these fields are smaller than u64 but all fields have u64
* alignment, so use u64 to ensure correct alignment.
*/
struct tmi_cvm_params {
u64 flags;
u64 s2sz;
u64 sve_vl;
u64 num_bps;
u64 num_wps;
u64 pmu_num_cnts;
u64 measurement_algo;
u64 vmid;
u64 ns_vtcr;
u64 vttbr_el2;
u64 ttt_base;
s64 ttt_level_start;
u64 ttt_num_start;
u8 rpv[64]; /* Bits 512 */
};
#define TMI_NOT_RUNNABLE 0
#define TMI_RUNNABLE 1
/*
* The number of GPRs (starting from X0) that are
* configured by the host when a TEC is created.
*/
#define TEC_CREATE_NR_GPRS (8U)
struct tmi_tec_params {
uint64_t gprs[TEC_CREATE_NR_GPRS];
uint64_t pc;
uint64_t flags;
uint64_t ram_size;
};
#define TEC_ENTRY_FLAG_EMUL_MMIO (1UL << 0U)
#define TEC_ENTRY_FLAG_INJECT_SEA (1UL << 1U)
#define TEC_ENTRY_FLAG_TRAP_WFI (1UL << 2U)
#define TEC_ENTRY_FLAG_TRAP_WFE (1UL << 3U)
#define TMI_EXIT_SYNC 0
#define TMI_EXIT_IRQ 1
#define TMI_EXIT_FIQ 2
#define TMI_EXIT_PSCI 3
#define TMI_EXIT_HOST_CALL 5
#define TMI_EXIT_SERROR 6
/*
* The number of GPRs (starting from X0) per voluntary exit context.
* Per SMCCC.
*/
#define TEC_EXIT_NR_GPRS (31U)
/* Maximum number of Interrupt Controller List Registers. */
#define TEC_GIC_NUM_LRS (16U)
struct tmi_tec_entry {
uint64_t flags;
uint64_t gprs[TEC_EXIT_NR_GPRS];
uint64_t gicv3_lrs[TEC_GIC_NUM_LRS];
uint64_t gicv3_hcr;
};
struct tmi_tec_exit {
uint64_t exit_reason;
uint64_t esr;
uint64_t far;
uint64_t hpfar;
uint64_t gprs[TEC_EXIT_NR_GPRS];
uint64_t gicv3_hcr;
uint64_t gicv3_lrs[TEC_GIC_NUM_LRS];
uint64_t gicv3_misr;
uint64_t gicv3_vmcr;
uint64_t cntv_ctl;
uint64_t cntv_cval;
uint64_t cntp_ctl;
uint64_t cntp_cval;
uint64_t imm;
uint64_t pmu_ovf_status;
};
struct tmi_tec_run {
struct tmi_tec_entry tec_entry;
struct tmi_tec_exit tec_exit;
};
#define TMI_FNUM_MIN_VALUE U(0x150)
#define TMI_FNUM_MAX_VALUE U(0x18F)
/******************************************************************************
* Bit definitions inside the function id as per the SMC calling convention
******************************************************************************/
#define FUNCID_TYPE_SHIFT 31
#define FUNCID_CC_SHIFT 30
#define FUNCID_OEN_SHIFT 24
#define FUNCID_NUM_SHIFT 0
#define FUNCID_TYPE_MASK 0x1
#define FUNCID_CC_MASK 0x1
#define FUNCID_OEN_MASK 0x3f
#define FUNCID_NUM_MASK 0xffff
#define FUNCID_TYPE_WIDTH 1
#define FUNCID_CC_WIDTH 1
#define FUNCID_OEN_WIDTH 6
#define FUNCID_NUM_WIDTH 16
#define SMC_64 1
#define SMC_32 0
#define SMC_TYPE_FAST 1
#define SMC_TYPE_STD 0
/*****************************************************************************
* Owning entity number definitions inside the function id as per the SMC
* calling convention
*****************************************************************************/
#define OEN_ARM_START 0
#define OEN_ARM_END 0
#define OEN_CPU_START 1
#define OEN_CPU_END 1
#define OEN_SIP_START 2
#define OEN_SIP_END 2
#define OEN_OEM_START 3
#define OEN_OEM_END 3
#define OEN_STD_START 4 /* Standard Calls */
#define OEN_STD_END 4
#define OEN_TAP_START 48 /* Trusted Applications */
#define OEN_TAP_END 49
#define OEN_TOS_START 50 /* Trusted OS */
#define OEN_TOS_END 63
#define OEN_LIMIT 64
/* Get TMI fastcall std FID from function number */
#define TMI_FID(smc_cc, func_num) \
((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \
((smc_cc) << FUNCID_CC_SHIFT) | \
(OEN_STD_START << FUNCID_OEN_SHIFT) | \
((func_num) << FUNCID_NUM_SHIFT))
#define U(_x) (_x##U)
#define TMI_NO_MEASURE_CONTENT U(0)
#define TMI_MEASURE_CONTENT U(1)
#define CVM_IPA_MAX_VAL (1UL << 48)
/*
* SMC_TMM_INIT_COMPLETE is the only function in the TMI that originates from
* the CVM world and is handled by the SPMD. The remaining functions are
* always invoked by the Normal world, forward by SPMD and handled by the
* TMM.
*/
#define TMI_FNUM_VERSION_REQ U(0x260)
#define TMI_FNUM_MEM_INFO_SHOW U(0x261)
#define TMI_FNUM_DATA_CREATE U(0x262)
#define TMI_FNUM_DATA_DESTROY U(0x263)
#define TMI_FNUM_CVM_ACTIVATE U(0x264)
#define TMI_FNUM_CVM_CREATE U(0x265)
#define TMI_FNUM_CVM_DESTROY U(0x266)
#define TMI_FNUM_TEC_CREATE U(0x267)
#define TMI_FNUM_TEC_DESTROY U(0x268)
#define TMI_FNUM_TEC_ENTER U(0x269)
#define TMI_FNUM_TTT_CREATE U(0x26A)
#define TMI_FNUM_PSCI_COMPLETE U(0x26B)
#define TMI_FNUM_FEATURES U(0x26C)
#define TMI_FNUM_TTT_MAP_RANGE U(0x26D)
#define TMI_FNUM_TTT_UNMAP_RANGE U(0x26E)
/* TMI SMC64 PIDs handled by the SPMD */
#define TMI_TMM_VERSION_REQ TMI_FID(SMC_64, TMI_FNUM_VERSION_REQ)
#define TMI_TMM_DATA_CREATE TMI_FID(SMC_64, TMI_FNUM_DATA_CREATE)
#define TMI_TMM_DATA_DESTROY TMI_FID(SMC_64, TMI_FNUM_DATA_DESTROY)
#define TMI_TMM_CVM_ACTIVATE TMI_FID(SMC_64, TMI_FNUM_CVM_ACTIVATE)
#define TMI_TMM_CVM_CREATE TMI_FID(SMC_64, TMI_FNUM_CVM_CREATE)
#define TMI_TMM_CVM_DESTROY TMI_FID(SMC_64, TMI_FNUM_CVM_DESTROY)
#define TMI_TMM_TEC_CREATE TMI_FID(SMC_64, TMI_FNUM_TEC_CREATE)
#define TMI_TMM_TEC_DESTROY TMI_FID(SMC_64, TMI_FNUM_TEC_DESTROY)
#define TMI_TMM_TEC_ENTER TMI_FID(SMC_64, TMI_FNUM_TEC_ENTER)
#define TMI_TMM_TTT_CREATE TMI_FID(SMC_64, TMI_FNUM_TTT_CREATE)
#define TMI_TMM_PSCI_COMPLETE TMI_FID(SMC_64, TMI_FNUM_PSCI_COMPLETE)
#define TMI_TMM_FEATURES TMI_FID(SMC_64, TMI_FNUM_FEATURES)
#define TMI_TMM_MEM_INFO_SHOW TMI_FID(SMC_64, TMI_FNUM_MEM_INFO_SHOW)
#define TMI_TMM_TTT_MAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_RANGE)
#define TMI_TMM_TTT_UNMAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_RANGE)
#define TMI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16)
#define TMI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF)
#define TMI_ABI_VERSION_MAJOR U(0x1)
/* KVM_CAP_ARM_TMM on VM fd */
#define KVM_CAP_ARM_TMM_CONFIG_CVM_HOST 0
#define KVM_CAP_ARM_TMM_CREATE_RD 1
#define KVM_CAP_ARM_TMM_POPULATE_CVM 2
#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 3
#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256 0
#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512 1
#define KVM_CAP_ARM_TMM_RPV_SIZE 64
/* List of configuration items accepted for KVM_CAP_ARM_TMM_CONFIG_CVM_HOST */
#define KVM_CAP_ARM_TMM_CFG_RPV 0
#define KVM_CAP_ARM_TMM_CFG_HASH_ALGO 1
#define KVM_CAP_ARM_TMM_CFG_SVE 2
#define KVM_CAP_ARM_TMM_CFG_DBG 3
#define KVM_CAP_ARM_TMM_CFG_PMU 4
DECLARE_STATIC_KEY_FALSE(kvm_cvm_is_available);
DECLARE_STATIC_KEY_FALSE(kvm_cvm_is_enable);
struct kvm_cap_arm_tmm_config_item {
__u32 cfg;
union {
/* cfg == KVM_CAP_ARM_TMM_CFG_RPV */
struct {
__u8 rpv[KVM_CAP_ARM_TMM_RPV_SIZE];
};
/* cfg == KVM_CAP_ARM_TMM_CFG_HASH_ALGO */
struct {
__u32 hash_algo;
};
/* cfg == KVM_CAP_ARM_TMM_CFG_SVE */
struct {
__u32 sve_vq;
};
/* cfg == KVM_CAP_ARM_TMM_CFG_DBG */
struct {
__u32 num_brps;
__u32 num_wrps;
};
/* cfg == KVM_CAP_ARM_TMM_CFG_PMU */
struct {
__u32 num_pmu_cntrs;
};
/* Fix the size of the union */
__u8 reserved[256];
};
};
#define KVM_ARM_TMM_POPULATE_FLAGS_MEASURE (1U << 0)
struct kvm_cap_arm_tmm_populate_region_args {
__u64 populate_ipa_base1;
__u64 populate_ipa_size1;
__u64 populate_ipa_base2;
__u64 populate_ipa_size2;
__u32 flags;
__u32 reserved[3];
};
static inline bool tmm_is_addr_ttt_level_aligned(uint64_t addr, int level)
{
uint64_t mask = (1 << (12 + 9 * (3 - level))) - 1;
return (addr & mask) == 0;
}
#define ID_AA64PFR0_SEL2_MASK ULL(0xf)
static inline bool is_armv8_4_sel2_present(void)
{
return ((read_sysreg(id_aa64pfr0_el1) >> ID_AA64PFR0_SEL2_SHIFT) &
ID_AA64PFR0_SEL2_MASK) == 1UL;
}
u64 tmi_version(void);
u64 tmi_data_create(u64 data, u64 rd, u64 map_addr, u64 src, u64 level);
u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level);
u64 tmi_cvm_activate(u64 rd);
u64 tmi_cvm_create(u64 params_ptr, u64 numa_set);
u64 tmi_cvm_destroy(u64 rd);
u64 tmi_tec_create(u64 numa_set, u64 rd, u64 mpidr, u64 params_ptr);
u64 tmi_tec_destroy(u64 tec);
u64 tmi_tec_enter(u64 tec, u64 run_ptr);
u64 tmi_ttt_create(u64 numa_set, u64 rd, u64 map_addr, u64 level);
u64 tmi_psci_complete(u64 calling_tec, u64 target_tec);
u64 tmi_features(u64 index);
u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node);
u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id);
u64 tmi_mem_info_show(u64 mem_info_addr);
void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu);
int kvm_load_user_data(struct kvm *kvm, unsigned long arg);
unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu,
unsigned long target_affinity, unsigned long lowest_affinity_level);
int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool serror_pending, bool ext_dabt_pending);
int kvm_create_cvm_vm(struct kvm *kvm);
int kvm_init_cvm_vm(struct kvm *kvm);
#endif
#endif

View File

@ -0,0 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2024, The Linux Foundation. All rights reserved.
*/
#ifndef __ASM_KVM_TMM_H
#define __ASM_KVM_TMM_H
#include <uapi/linux/kvm.h>
enum cvm_state {
CVM_STATE_NONE = 1,
CVM_STATE_NEW,
CVM_STATE_ACTIVE,
CVM_STATE_DYING
};
struct cvm {
enum cvm_state state;
u32 cvm_vmid;
u64 rd;
u64 loader_start;
u64 image_end;
u64 initrd_start;
u64 dtb_end;
u64 ram_size;
struct kvm_numa_info numa_info;
struct tmi_cvm_params *params;
bool is_cvm;
};
/*
* struct cvm_tec - Additional per VCPU data for a CVM
*/
struct cvm_tec {
u64 tec;
bool tec_created;
void *tec_run;
};
int kvm_init_tmm(void);
int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap);
void kvm_destroy_cvm(struct kvm *kvm);
int kvm_finalize_vcpu_tec(struct kvm_vcpu *vcpu);
void kvm_destroy_tec(struct kvm_vcpu *vcpu);
int kvm_tec_enter(struct kvm_vcpu *vcpu);
int handle_cvm_exit(struct kvm_vcpu *vcpu, int rec_run_status);
int kvm_arm_create_cvm(struct kvm *kvm);
void kvm_free_rd(struct kvm *kvm);
int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target);
void kvm_cvm_unmap_destroy_range(struct kvm *kvm);
#define CVM_TTT_BLOCK_LEVEL 2
#define CVM_TTT_MAX_LEVEL 3
#define CVM_PAGE_SHIFT 12
#define CVM_PAGE_SIZE BIT(CVM_PAGE_SHIFT)
#define CVM_TTT_LEVEL_SHIFT(l) \
((CVM_PAGE_SHIFT - 3) * (4 - (l)) + 3)
#define CVM_L2_BLOCK_SIZE BIT(CVM_TTT_LEVEL_SHIFT(2))
static inline unsigned long cvm_ttt_level_mapsize(int level)
{
if (WARN_ON(level > CVM_TTT_BLOCK_LEVEL))
return CVM_PAGE_SIZE;
return (1UL << CVM_TTT_LEVEL_SHIFT(level));
}
#endif

View File

@ -4,6 +4,7 @@
#define _ASM_ARM64_SET_MEMORY_H
#include <asm-generic/set_memory.h>
#include <asm/cvm_guest.h>
bool can_set_direct_map(void);
#define can_set_direct_map can_set_direct_map

View File

@ -0,0 +1,83 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ASM_CVM_TSI_H_
#define __ASM_CVM_TSI_H_
#include <linux/ioctl.h>
#define TSI_MAGIC 'T'
/* Measurement slot reserved for RIM */
#define RIM_MEASUREMENT_SLOT (0U)
/* Maximum number of measurements */
#define MEASUREMENT_SLOT_NR (5U)
/* Size in bytes of the SHA256 measurement */
#define SHA256_SIZE (32U)
/* Size in bytes of the SHA512 measurement */
#define SHA512_SIZE (64U)
/*
* Size in bytes of the largest measurement type that can be supported.
* This macro needs to be updated accordingly if new algorithms are supported.
*/
#define MAX_MEASUREMENT_SIZE SHA512_SIZE
#define MAX_DEV_CERT_SIZE (4096U)
#define GRANULE_SIZE (4096U)
#define MAX_TOKEN_GRANULE_COUNT (2U)
#define CHALLENGE_SIZE (64U)
struct cvm_attester {
int dev_fd;
};
struct cvm_measurement {
int index;
unsigned char value[MAX_MEASUREMENT_SIZE];
};
struct cvm_tsi_version {
int major;
int minor;
};
struct cvm_config {
unsigned long ipa_bits; /* Width of IPA in bits */
unsigned long algorithm; /* Hash algorithm */
};
struct cvm_measurement_extend {
unsigned long index;
unsigned long size;
unsigned char value[MAX_MEASUREMENT_SIZE];
};
struct cvm_attestation_cmd {
unsigned char challenge[CHALLENGE_SIZE]; /* input: challenge value */
unsigned char token[GRANULE_SIZE * MAX_TOKEN_GRANULE_COUNT];
unsigned long token_size; /* return: token size */
};
struct cvm_token_granule {
void *head;
void *ipa; /* IPA of the Granule to which the token will be written */
unsigned long count;
unsigned long offset; /* Offset within Granule to start of buffer in bytes */
unsigned long size; /* Size of buffer in bytes */
unsigned long num_wr_bytes; /* Number of bytes written to buffer */
};
struct cca_device_cert {
unsigned long size;
unsigned char value[MAX_DEV_CERT_SIZE];
};
#define TMM_GET_TSI_VERSION _IOR(TSI_MAGIC, 0, struct cvm_tsi_version)
#define TMM_GET_ATTESTATION_TOKEN _IOWR(TSI_MAGIC, 1, struct cvm_attestation_cmd)
#define TMM_GET_DEVICE_CERT _IOR(TSI_MAGIC, 2, struct cca_device_cert)
#endif /* __ASM_CVM_TSI_H_ */

View File

@ -78,5 +78,6 @@
#define HWCAP2_ECV (1 << 19)
#define HWCAP2_AFP (1 << 20)
#define HWCAP2_RPRES (1 << 21)
#define HWCAP2_WFXT (1 << 23)
#endif /* _UAPI__ASM_HWCAP_H */

View File

@ -106,6 +106,7 @@ struct kvm_regs {
#define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */
#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */
#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
#define KVM_ARM_VCPU_TEC 8 /* VCPU TEC state as part of cvm */
struct kvm_vcpu_init {
__u32 target;
@ -354,6 +355,9 @@ struct kvm_vcpu_events {
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
#define KVM_DEV_ARM_ITS_CTRL_RESET 4
#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA256 0
#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA512 1
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
#define KVM_ARM_VCPU_PMU_V3_IRQ 0

View File

@ -63,6 +63,7 @@ obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_ARM64_MPAM) += mpam.o
obj-$(CONFIG_ARM64_MTE) += mte.o
obj-$(CONFIG_CVM_GUEST) += cvm_guest.o cvm_tsi.o
obj-y += vdso/ probes/
obj-$(CONFIG_COMPAT_VDSO) += vdso32/

View File

@ -211,6 +211,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_WFXT_SHIFT, 4, 0),
ARM64_FTR_END,
};
@ -2193,6 +2194,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.min_field_value = 1,
},
#endif
{
.desc = "WFx with timeout",
.capability = ARM64_HAS_WFXT,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.sys_reg = SYS_ID_AA64ISAR2_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64ISAR2_WFXT_SHIFT,
.matches = has_cpuid_feature,
.min_field_value = ID_AA64ISAR2_WFXT_SUPPORTED,
},
{},
};
@ -2315,6 +2326,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_WFXT_SHIFT, FTR_UNSIGNED, ID_AA64ISAR2_WFXT_SUPPORTED, CAP_HWCAP, KERNEL_HWCAP_WFXT),
{},
};

View File

@ -117,6 +117,7 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_ECV] = "ecv",
[KERNEL_HWCAP_AFP] = "afp",
[KERNEL_HWCAP_RPRES] = "rpres",
[KERNEL_HWCAP_WFXT] = "wfxt",
};
#ifdef CONFIG_COMPAT

View File

@ -0,0 +1,121 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/cvm_smc.h>
#define CVM_PTE_NS_BIT 5
#define CVM_PTE_NS_MASK (1 << CVM_PTE_NS_BIT)
static bool cvm_guest_enable __read_mostly;
DEFINE_STATIC_KEY_FALSE_RO(cvm_tsi_present);
/* please use 'cvm_guest=1' to enable cvm guest feature */
static int __init setup_cvm_guest(char *str)
{
int ret;
unsigned int val;
if (!str)
return 0;
ret = kstrtouint(str, 10, &val);
if (ret) {
pr_warn("Unable to parse cvm_guest.\n");
} else {
if (val)
cvm_guest_enable = true;
}
return ret;
}
early_param("cvm_guest", setup_cvm_guest);
static bool tsi_version_matches(void)
{
unsigned long ver = tsi_get_version();
if (ver == SMCCC_RET_NOT_SUPPORTED)
return false;
pr_info("RME: TSI version %lu.%lu advertised\n",
TSI_ABI_VERSION_GET_MAJOR(ver),
TSI_ABI_VERSION_GET_MINOR(ver));
return (ver >= TSI_ABI_VERSION &&
TSI_ABI_VERSION_GET_MAJOR(ver) == TSI_ABI_VERSION_MAJOR);
}
void __init cvm_tsi_init(void)
{
if (!cvm_guest_enable)
return;
if (!tsi_version_matches())
return;
static_branch_enable(&cvm_tsi_present);
}
bool is_cvm_world(void)
{
return cvm_guest_enable && static_branch_likely(&cvm_tsi_present);
}
static int change_page_range_cvm(pte_t *ptep, unsigned long addr, void *data)
{
bool encrypt = (bool)data;
pte_t pte = READ_ONCE(*ptep);
if (encrypt) {
if (!(pte.pte & CVM_PTE_NS_MASK))
return 0;
pte.pte = pte.pte & (~CVM_PTE_NS_MASK);
} else {
if (pte.pte & CVM_PTE_NS_MASK)
return 0;
/* Set NS BIT */
pte.pte = pte.pte | CVM_PTE_NS_MASK;
}
set_pte(ptep, pte);
return 0;
}
static int __change_memory_common_cvm(unsigned long start, unsigned long size, bool encrypt)
{
int ret;
ret = apply_to_page_range(&init_mm, start, size, change_page_range_cvm, (void *)encrypt);
flush_tlb_kernel_range(start, start + size);
return ret;
}
static int __set_memory_encrypted(unsigned long addr,
int numpages,
bool encrypt)
{
if (!is_cvm_world())
return 0;
WARN_ON(!__is_lm_address(addr));
return __change_memory_common_cvm(addr, PAGE_SIZE * numpages, encrypt);
}
int set_cvm_memory_encrypted(unsigned long addr, int numpages)
{
return __set_memory_encrypted(addr, numpages, true);
}
int set_cvm_memory_decrypted(unsigned long addr, int numpages)
{
return __set_memory_encrypted(addr, numpages, false);
}

217
arch/arm64/kernel/cvm_tsi.c Normal file
View File

@ -0,0 +1,217 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
#include <linux/preempt.h>
#include <asm/cvm_guest.h>
#include <asm/cvm_smc.h>
#include <asm/cvm_tsi.h>
struct attestation_token {
void *buf;
unsigned long size;
};
static struct attestation_token token;
static DEFINE_MUTEX(token_lock);
static long tmm_tsi_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static int tmm_get_tsi_version(struct cvm_tsi_version __user *arg);
static int tmm_get_attestation_token(struct cvm_attestation_cmd __user *arg);
static int tmm_get_device_cert(struct cca_device_cert __user *arg);
static const struct file_operations tmm_tsi_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = tmm_tsi_ioctl
};
static struct miscdevice ioctl_dev = {
MISC_DYNAMIC_MINOR,
"tsi",
&tmm_tsi_fops,
};
static int __init tmm_tsi_init(void)
{
unsigned long ver;
int ret;
if (!is_cvm_world())
return -EIO;
ret = misc_register(&ioctl_dev);
if (ret) {
pr_err("tmm_tsi: misc device register failed (%d)!\n", ret);
return ret;
}
/* Allocate a large memory */
token.buf = kzalloc(GRANULE_SIZE * MAX_TOKEN_GRANULE_COUNT, GFP_KERNEL);
if (!token.buf)
return -ENOMEM;
pr_warn("tmm_tsi: module loaded (version %lu.%lu).\n",
TSI_ABI_VERSION_GET_MAJOR(ver),
TSI_ABI_VERSION_GET_MINOR(ver));
return 0;
}
static void __exit tmm_tsi_exit(void)
{
if (token.buf != NULL) {
memset(token.buf, 0, GRANULE_SIZE * MAX_TOKEN_GRANULE_COUNT);
kfree(token.buf);
}
misc_deregister(&ioctl_dev);
pr_warn("tmm_tsi: module unloaded.\n");
}
static long tmm_tsi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret;
switch (cmd) {
case TMM_GET_TSI_VERSION:
ret = tmm_get_tsi_version((struct cvm_tsi_version *)arg);
break;
case TMM_GET_ATTESTATION_TOKEN:
ret = tmm_get_attestation_token((struct cvm_attestation_cmd *)arg);
break;
case TMM_GET_DEVICE_CERT:
ret = tmm_get_device_cert((struct cca_device_cert *)arg);
break;
default:
pr_err("tmm_tsi: unknown ioctl command (0x%x)!\n", cmd);
return -ENOTTY;
}
return ret;
}
static int tmm_get_tsi_version(struct cvm_tsi_version __user *arg)
{
struct cvm_tsi_version ver_measured = {0};
unsigned long ver;
unsigned long ret;
ver = tsi_get_version();
ver_measured.major = TSI_ABI_VERSION_GET_MAJOR(ver);
ver_measured.minor = TSI_ABI_VERSION_GET_MINOR(ver);
ret = copy_to_user(arg, &ver_measured, sizeof(struct cvm_tsi_version));
if (ret) {
pr_err("tmm_tsi: copy data to user failed (%lu)!\n", ret);
return -EFAULT;
}
return 0;
}
static int tmm_get_attestation_token(struct cvm_attestation_cmd __user *arg)
{
unsigned long ret;
struct cvm_token_granule token_granule = {0};
unsigned char challenge[CHALLENGE_SIZE];
ret = copy_from_user(challenge, &(arg->challenge), CHALLENGE_SIZE);
if (ret) {
pr_err("tmm_tsi: copy challenge from user failed (%lu)!\n", ret);
return -EFAULT;
}
mutex_lock(&token_lock);
token_granule.head = token.buf;
token_granule.ipa = token_granule.head;
ret = tsi_attestation_token_init(challenge);
if (ret) {
pr_err("tmm_tsi: tsi call tsi_attestation_token_init failed (%lu)!\n", ret);
mutex_unlock(&token_lock);
return -EIO;
}
do { /* Retrieve one Granule of data per loop iteration */
if (token_granule.count + 1 > MAX_TOKEN_GRANULE_COUNT) {
pr_err("tmm_tsi: macro MAX_TOKEN_GRANULE_COUNT (%d) is too small!\n",
MAX_TOKEN_GRANULE_COUNT);
mutex_unlock(&token_lock);
return -ENOMEM;
}
token_granule.ipa = token_granule.head + (token_granule.count * GRANULE_SIZE);
token_granule.offset = 0;
do { /* Retrieve sub-Granule chunk of data per loop iteration */
token_granule.size = GRANULE_SIZE - token_granule.offset;
ret = tsi_attestation_token_continue(&token_granule);
token_granule.offset += token_granule.num_wr_bytes;
} while (ret == TSI_INCOMPLETE && token_granule.offset < GRANULE_SIZE);
token_granule.count++;
} while (ret == TSI_INCOMPLETE);
/* Send to user space the total size of the token */
token.size = (GRANULE_SIZE * (token_granule.count - 1)) + token_granule.offset;
ret = copy_to_user(&(arg->token_size), &(token.size), sizeof(token.size));
if (ret) {
pr_err("tmm_tsi: copy token_size to user failed (%lu)!\n", ret);
mutex_unlock(&token_lock);
return -EFAULT;
}
ret = copy_to_user(arg->token, token.buf, token.size);
if (ret) {
pr_err("tmm_tsi: copy token to user failed (%lu)!\n", ret);
mutex_unlock(&token_lock);
return -EFAULT;
}
mutex_unlock(&token_lock);
return 0;
}
static int tmm_get_device_cert(struct cca_device_cert __user *arg)
{
unsigned long ret;
unsigned char *device_cert;
unsigned long device_cert_size;
device_cert_size = MAX_DEV_CERT_SIZE;
device_cert = kzalloc(device_cert_size, GFP_KERNEL);
if (!device_cert)
return -ENOMEM;
ret = tsi_get_device_cert(device_cert, &device_cert_size);
if (ret != TSI_SUCCESS) {
pr_err("tmm_tsi: tsi call tsi_get_device_cert failed (%lu)!\n", ret);
kfree(device_cert);
return -EIO;
}
ret = copy_to_user(arg->value, device_cert, device_cert_size);
if (ret) {
pr_err("tmm_tsi: copy data to user failed (%lu)!\n", ret);
kfree(device_cert);
return -EFAULT;
}
kfree(device_cert);
ret = copy_to_user(&(arg->size), &device_cert_size, sizeof(device_cert_size));
if (ret) {
pr_err("tmm_tsi: copy data to user failed (%lu)!\n", ret);
return -EFAULT;
}
return 0;
}
module_init(tmm_tsi_init);
module_exit(tmm_tsi_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("HUAWEI TECHNOLOGIES CO., LTD.");
MODULE_DESCRIPTION("Interacting with TMM through TSI interface from user space.");

View File

@ -41,6 +41,7 @@
#include <asm/cpu_ops.h>
#include <asm/kasan.h>
#include <asm/numa.h>
#include <asm/cvm_guest.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@ -306,6 +307,9 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
jump_label_init();
parse_early_param();
/* Init TSI after jump_labels are active */
cvm_tsi_init();
/*
* Unmask asynchronous aborts and fiq after bringing up possible
* earlycon. (Report possible System Errors once we can report this

View File

@ -64,6 +64,13 @@ config KVM_ARM_PMU
Adds support for a virtual Performance Monitoring Unit (PMU) in
virtual machines.
config CVM_HOST
bool "Enable cvm host feature"
help
Support CVM based on S-EL2
If unsure, say N.
endif # KVM
endif # VIRTUALIZATION

View File

@ -25,6 +25,10 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
vgic/vgic-its.o vgic/vgic-debug.o
kvm-$(CONFIG_CVM_HOST) += tmi.o
kvm-$(CONFIG_CVM_HOST) += cvm.o
kvm-$(CONFIG_CVM_HOST) += cvm_exit.o
kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
kvm-$(CONFIG_VIRT_PLAT_DEV) += vgic/shadow_dev.o

View File

@ -16,6 +16,10 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#ifdef CONFIG_CVM_HOST
#include <asm/kvm_tmi.h>
#endif
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
@ -125,10 +129,80 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
}
}
#ifdef CONFIG_CVM_HOST
static bool cvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
{
return timer_ctx &&
((timer_get_ctl(timer_ctx) &
(ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
}
void kvm_cvm_timers_update(struct kvm_vcpu *vcpu)
{
int i;
u64 cval, now;
bool status, level;
struct arch_timer_context *timer;
struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu;
for (i = 0; i < NR_KVM_TIMERS; i++) {
timer = &arch_timer->timers[i];
if (!timer->loaded) {
if (!cvm_timer_irq_can_fire(timer))
continue;
cval = timer_get_cval(timer);
now = kvm_phys_timer_read() - timer_get_offset(timer);
level = (cval <= now);
kvm_timer_update_irq(vcpu, level, timer);
} else {
status = timer_get_ctl(timer) & ARCH_TIMER_CTRL_IT_STAT;
level = cvm_timer_irq_can_fire(timer) && status;
if (level != timer->irq.level)
kvm_timer_update_irq(vcpu, level, timer);
}
}
}
static void set_cvm_timers_loaded(struct kvm_vcpu *vcpu, bool loaded)
{
int i;
struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu;
for (i = 0; i < NR_KVM_TIMERS; i++) {
struct arch_timer_context *timer = &arch_timer->timers[i];
timer->loaded = loaded;
}
}
static void kvm_timer_blocking(struct kvm_vcpu *vcpu);
static void kvm_timer_unblocking(struct kvm_vcpu *vcpu);
static inline void cvm_vcpu_load_timer_callback(struct kvm_vcpu *vcpu)
{
kvm_cvm_timers_update(vcpu);
kvm_timer_unblocking(vcpu);
set_cvm_timers_loaded(vcpu, true);
}
static inline void cvm_vcpu_put_timer_callback(struct kvm_vcpu *vcpu)
{
set_cvm_timers_loaded(vcpu, false);
if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)))
kvm_timer_blocking(vcpu);
}
#endif
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
#ifdef CONFIG_CVM_HOST
if (kvm_is_cvm(vcpu->kvm))
return;
#endif
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
__vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
@ -207,18 +281,16 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
static u64 kvm_counter_compute_delta(struct arch_timer_context *timer_ctx,
u64 val)
{
u64 cval, now;
u64 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
cval = timer_get_cval(timer_ctx);
now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
if (now < cval) {
if (now < val) {
u64 ns;
ns = cyclecounter_cyc2ns(timecounter->cc,
cval - now,
val - now,
timecounter->mask,
&timecounter->frac);
return ns;
@ -227,6 +299,11 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
return 0;
}
static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
{
return kvm_counter_compute_delta(timer_ctx, timer_get_cval(timer_ctx));
}
static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
{
WARN_ON(timer_ctx && timer_ctx->loaded);
@ -235,6 +312,20 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
(ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
}
static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
{
return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
(vcpu->arch.flags & KVM_ARM64_WFIT));
}
static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *ctx = vcpu_vtimer(vcpu);
u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
return kvm_counter_compute_delta(ctx, val);
}
/*
* Returns the earliest expiration time in ns among guest timers.
* Note that it will return 0 if none of timers can fire.
@ -252,6 +343,9 @@ static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
}
if (vcpu_has_wfit_active(vcpu))
min_delta = min(min_delta, wfit_delay_ns(vcpu));
/* If none of timers can fire, then return 0 */
if (min_delta == ULLONG_MAX)
return 0;
@ -349,15 +443,9 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
return cval <= now;
}
bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
struct timer_map map;
get_timer_map(vcpu, &map);
return kvm_timer_should_fire(map.direct_vtimer) ||
kvm_timer_should_fire(map.direct_ptimer) ||
kvm_timer_should_fire(map.emul_ptimer);
return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
}
/*
@ -483,7 +571,8 @@ static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
*/
if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
!kvm_timer_irq_can_fire(map.direct_ptimer) &&
!kvm_timer_irq_can_fire(map.emul_ptimer))
!kvm_timer_irq_can_fire(map.emul_ptimer) &&
!vcpu_has_wfit_active(vcpu))
return;
/*
@ -602,6 +691,13 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct timer_map map;
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu)) {
cvm_vcpu_load_timer_callback(vcpu);
return;
}
#endif
if (unlikely(!timer->enabled))
return;
@ -650,6 +746,13 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
struct timer_map map;
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu)) {
cvm_vcpu_put_timer_callback(vcpu);
return;
}
#endif
if (unlikely(!timer->enabled))
return;
@ -775,7 +878,12 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
ptimer->vcpu = vcpu;
/* Synchronize cntvoff across all vtimers of a VM. */
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
#ifdef CONFIG_CVM_HOST
if (kvm_is_cvm(vcpu->kvm))
update_vtimer_cntvoff(vcpu, 0);
else
#endif
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
timer_set_offset(ptimer, 0);
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
@ -1154,6 +1262,15 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
#ifdef CONFIG_CVM_HOST
/*
* We don't use mapped IRQs for CVM because the TMI doesn't allow
* us setting the LR.HW bit in the VGIC.
*/
if (vcpu_is_tec(vcpu))
return 0;
#endif
get_timer_map(vcpu, &map);
ret = kvm_vgic_map_phys_irq(vcpu,

View File

@ -38,6 +38,10 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/sections.h>
#ifdef CONFIG_CVM_HOST
#include <asm/kvm_tmi.h>
#include <linux/perf/arm_pmu.h>
#endif
#include <kvm/arm_hypercalls.h>
#include <kvm/arm_pmu.h>
@ -109,6 +113,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = 0;
kvm->arch.return_nisv_io_abort_to_user = true;
break;
#ifdef CONFIG_CVM_HOST
case KVM_CAP_ARM_TMM:
if (static_branch_unlikely(&kvm_cvm_is_available))
r = kvm_cvm_enable_cap(kvm, cap);
break;
#endif
default:
r = -EINVAL;
break;
@ -150,14 +160,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return ret;
#endif
#ifdef CONFIG_CVM_HOST
if (kvm_arm_cvm_type(type)) {
ret = kvm_create_cvm_vm(kvm);
if (ret)
return ret;
}
#endif
ret = kvm_arm_setup_stage2(kvm, type);
if (ret)
#ifdef CONFIG_CVM_HOST
goto out_free_cvm;
#else
return ret;
#endif
ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
if (ret)
#ifdef CONFIG_CVM_HOST
goto out_free_cvm;
#else
return ret;
#endif
ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
if (ret)
goto out_free_stage2_pgd;
@ -169,9 +194,23 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
set_default_csv2(kvm);
#ifdef CONFIG_CVM_HOST
if (kvm_arm_cvm_type(type)) {
ret = kvm_init_cvm_vm(kvm);
if (ret)
goto out_free_stage2_pgd;
}
#endif
return ret;
out_free_stage2_pgd:
kvm_free_stage2_pgd(&kvm->arch.mmu);
#ifdef CONFIG_CVM_HOST
out_free_cvm:
kfree(kvm->arch.cvm);
kvm->arch.cvm = NULL;
#endif
return ret;
}
@ -204,6 +243,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
}
}
atomic_set(&kvm->online_vcpus, 0);
#ifdef CONFIG_CVM_HOST
if (kvm_is_cvm(kvm))
kvm_destroy_cvm(kvm);
#endif
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@ -259,12 +302,26 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1;
break;
case KVM_CAP_STEAL_TIME:
r = kvm_arm_pvtime_supported();
#ifdef CONFIG_CVM_HOST
if (kvm && kvm_is_cvm(kvm))
r = 0;
else
#endif
r = kvm_arm_pvtime_supported();
break;
#ifdef CONFIG_VIRT_PLAT_DEV
case KVM_CAP_ARM_VIRT_MSI_BYPASS:
r = sdev_enable;
break;
#endif
#ifdef CONFIG_CVM_HOST
case KVM_CAP_ARM_TMM:
if (!is_armv8_4_sel2_present()) {
r = -ENXIO;
break;
}
r = static_key_enabled(&kvm_cvm_is_available);
break;
#endif
default:
r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
@ -358,7 +415,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (err)
return err;
#endif
return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
}
@ -382,11 +438,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
#endif
}
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return kvm_timer_is_pending(vcpu);
}
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
/*
@ -436,8 +487,23 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->cpu = cpu;
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu)) {
if (single_task_running())
vcpu_clear_wfx_traps(vcpu);
else
vcpu_set_wfx_traps(vcpu);
}
#endif
kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu);
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu)) {
if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
return;
}
#endif
if (has_vhe())
kvm_vcpu_load_sysregs_vhe(vcpu);
kvm_arch_vcpu_load_fp(vcpu);
@ -460,6 +526,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu)) {
kvm_cvm_vcpu_put(vcpu);
return;
}
#endif
kvm_arch_vcpu_put_fp(vcpu);
if (has_vhe())
kvm_vcpu_put_sysregs_vhe(vcpu);
@ -641,6 +713,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Tell the rest of the code that there are userspace irqchip
* VMs in the wild.
*/
#ifdef CONFIG_CVM_HOST
if (!kvm_is_cvm(kvm))
#endif
static_branch_inc(&userspace_irqchip_in_use);
}
@ -778,6 +853,17 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
xfer_to_guest_mode_work_pending();
}
#ifdef CONFIG_CVM_HOST
static inline void update_pmu_phys_irq(struct kvm_vcpu *vcpu, bool *pmu_stopped)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
if (pmu->irq_level) {
*pmu_stopped = true;
arm_pmu_set_phys_irq(false);
}
}
#endif
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
@ -800,7 +886,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
ret = kvm_vcpu_first_run_init(vcpu);
if (ret)
return ret;
if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_handle_mmio_return(vcpu);
if (ret)
@ -817,6 +902,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
ret = 1;
run->exit_reason = KVM_EXIT_UNKNOWN;
while (ret > 0) {
#ifdef CONFIG_CVM_HOST
bool pmu_stopped = false;
#endif
/*
* Check conditions before entering the guest
*/
@ -836,6 +924,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
preempt_disable();
kvm_pmu_flush_hwstate(vcpu);
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu))
update_pmu_phys_irq(vcpu, &pmu_stopped);
#endif
local_irq_disable();
@ -875,8 +967,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
trace_kvm_entry(*vcpu_pc(vcpu));
guest_enter_irqoff();
ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu))
ret = kvm_tec_enter(vcpu);
else
#endif
ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
@ -931,12 +1027,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* guest time.
*/
guest_exit();
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
/* Exit types that need handling before we can be preempted */
handle_exit_early(vcpu, ret);
#ifdef CONFIG_CVM_HOST
if (!vcpu_is_tec(vcpu)) {
#endif
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
/* Exit types that need handling before we can be preempted */
handle_exit_early(vcpu, ret);
#ifdef CONFIG_CVM_HOST
}
#endif
preempt_enable();
#ifdef CONFIG_CVM_HOST
if (pmu_stopped)
arm_pmu_set_phys_irq(true);
#endif
/*
* The ARMv8 architecture doesn't give the hypervisor
@ -956,8 +1061,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu->arch.target = -1;
ret = ARM_EXCEPTION_IL;
}
ret = handle_exit(vcpu, ret);
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu))
ret = handle_cvm_exit(vcpu, ret);
else
#endif
ret = handle_exit(vcpu, ret);
}
/* Tell userspace about in-kernel device output levels */
@ -1386,6 +1495,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
void __user *argp = (void __user *)arg;
switch (ioctl) {
#ifdef CONFIG_CVM_HOST
case KVM_LOAD_USER_DATA: {
return kvm_load_user_data(kvm, arg);
}
#endif
case KVM_CREATE_IRQCHIP: {
int ret;
if (!vgic_present)
@ -1933,7 +2047,13 @@ int kvm_arch_init(void *opaque)
kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
return -ENODEV;
}
#ifdef CONFIG_CVM_HOST
if (static_branch_unlikely(&kvm_cvm_is_enable) && in_hyp_mode) {
err = kvm_init_tmm();
if (err)
return err;
}
#endif
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
cpus_have_final_cap(ARM64_WORKAROUND_1508412))
kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \

909
arch/arm64/kvm/cvm.c Normal file
View File

@ -0,0 +1,909 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2024, The Linux Foundation. All rights reserved.
*/
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <asm/kvm_tmi.h>
#include <asm/kvm_pgtable.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#include <asm/stage2_pgtable.h>
#include <linux/arm-smccc.h>
#include <kvm/arm_hypercalls.h>
#include <kvm/arm_psci.h>
/* Protects access to cvm_vmid_bitmap */
static DEFINE_SPINLOCK(cvm_vmid_lock);
static unsigned long *cvm_vmid_bitmap;
DEFINE_STATIC_KEY_FALSE(kvm_cvm_is_available);
DEFINE_STATIC_KEY_FALSE(kvm_cvm_is_enable);
#define SIMD_PAGE_SIZE 0x3000
static int __init setup_cvm_host(char *str)
{
int ret;
unsigned int val;
if (!str)
return 0;
ret = kstrtouint(str, 10, &val);
if (ret) {
pr_warn("Unable to parse cvm_guest.\n");
} else {
if (val)
static_branch_enable(&kvm_cvm_is_enable);
}
return ret;
}
early_param("cvm_host", setup_cvm_host);
static int cvm_vmid_init(void)
{
unsigned int vmid_count = 1 << kvm_get_vmid_bits();
cvm_vmid_bitmap = bitmap_zalloc(vmid_count, GFP_KERNEL);
if (!cvm_vmid_bitmap) {
kvm_err("%s: Couldn't allocate cvm vmid bitmap\n", __func__);
return -ENOMEM;
}
return 0;
}
static unsigned long tmm_feat_reg0;
static bool tmm_supports(unsigned long feature)
{
return !!u64_get_bits(tmm_feat_reg0, feature);
}
bool kvm_cvm_supports_sve(void)
{
return tmm_supports(TMI_FEATURE_REGISTER_0_SVE_EN);
}
bool kvm_cvm_supports_pmu(void)
{
return tmm_supports(TMI_FEATURE_REGISTER_0_PMU_EN);
}
u32 kvm_cvm_ipa_limit(void)
{
return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_S2SZ);
}
u32 kvm_cvm_get_num_brps(void)
{
return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_BPS);
}
u32 kvm_cvm_get_num_wrps(void)
{
return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_WPS);
}
static int cvm_vmid_reserve(void)
{
int ret;
unsigned int vmid_count = 1 << kvm_get_vmid_bits();
spin_lock(&cvm_vmid_lock);
ret = bitmap_find_free_region(cvm_vmid_bitmap, vmid_count, 0);
spin_unlock(&cvm_vmid_lock);
return ret;
}
static void cvm_vmid_release(unsigned int vmid)
{
spin_lock(&cvm_vmid_lock);
bitmap_release_region(cvm_vmid_bitmap, vmid, 0);
spin_unlock(&cvm_vmid_lock);
}
static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
{
u64 shift = ARM64_HW_PGTABLE_LEVEL_SHIFT(pgt->start_level - 1);
u64 mask = BIT(pgt->ia_bits) - 1;
return (addr & mask) >> shift;
}
static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
{
struct kvm_pgtable pgt = {
.ia_bits = ia_bits,
.start_level = start_level,
};
return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
}
/*
* the configurable physical numa range in QEMU is 0-127,
* but in real scenarios, 0-63 is sufficient.
*/
static u64 kvm_get_host_numa_set_by_vcpu(u64 vcpu, struct kvm *kvm)
{
int64_t i;
struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
struct kvm_numa_info *numa_info = &cvm->numa_info;
for (i = 0; i < numa_info->numa_cnt && i < MAX_NUMA_NODE; i++) {
if (test_bit(vcpu, (unsigned long *)numa_info->numa_nodes[i].cpu_id))
return numa_info->numa_nodes[i].host_numa_nodes[0];
}
return NO_NUMA;
}
static u64 kvm_get_first_binded_numa_set(struct kvm *kvm)
{
struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
struct kvm_numa_info *numa_info = &cvm->numa_info;
if (numa_info->numa_cnt > 0)
return numa_info->numa_nodes[0].host_numa_nodes[0];
return NO_NUMA;
}
int kvm_arm_create_cvm(struct kvm *kvm)
{
int ret;
struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
unsigned int pgd_sz;
struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
/* get affine host numa set by default vcpu 0 */
u64 numa_set = kvm_get_host_numa_set_by_vcpu(0, kvm);
if (!kvm_is_cvm(kvm) || kvm_cvm_state(kvm) != CVM_STATE_NONE)
return 0;
if (!cvm->params) {
ret = -EFAULT;
goto out;
}
ret = cvm_vmid_reserve();
if (ret < 0)
goto out;
cvm->cvm_vmid = ret;
pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level);
cvm->params->ttt_level_start = kvm->arch.mmu.pgt->start_level;
cvm->params->ttt_num_start = pgd_sz;
cvm->params->s2sz = VTCR_EL2_IPA(kvm->arch.vtcr);
cvm->params->vmid = cvm->cvm_vmid;
cvm->params->ns_vtcr = kvm->arch.vtcr;
cvm->params->vttbr_el2 = kvm->arch.mmu.pgd_phys;
memcpy(cvm->params->rpv, &cvm->cvm_vmid, sizeof(cvm->cvm_vmid));
cvm->rd = tmi_cvm_create(__pa(cvm->params), numa_set);
if (!cvm->rd) {
kvm_err("KVM creates cVM failed: %d\n", cvm->cvm_vmid);
ret = -ENOMEM;
goto out;
}
WRITE_ONCE(cvm->state, CVM_STATE_NEW);
ret = 0;
out:
kfree(cvm->params);
cvm->params = NULL;
if (ret < 0) {
kfree(cvm);
kvm->arch.cvm = NULL;
}
return ret;
}
void kvm_destroy_cvm(struct kvm *kvm)
{
struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
uint32_t cvm_vmid;
if (!cvm)
return;
cvm_vmid = cvm->cvm_vmid;
kfree(cvm->params);
cvm->params = NULL;
if (kvm_cvm_state(kvm) == CVM_STATE_NONE)
return;
cvm_vmid_release(cvm_vmid);
WRITE_ONCE(cvm->state, CVM_STATE_DYING);
if (!tmi_cvm_destroy(cvm->rd))
kvm_info("KVM has destroyed cVM: %d\n", cvm->cvm_vmid);
kfree(kvm->arch.cvm);
kvm->arch.cvm = NULL;
}
static int kvm_cvm_ttt_create(struct cvm *cvm,
unsigned long addr,
int level,
u64 numa_set)
{
addr = ALIGN_DOWN(addr, cvm_ttt_level_mapsize(level - 1));
return tmi_ttt_create(numa_set, cvm->rd, addr, level);
}
int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct cvm *cvm,
unsigned long ipa,
int level,
int max_level,
struct kvm_mmu_memory_cache *mc)
{
int ret = 0;
if (WARN_ON(level == max_level))
return 0;
while (level++ < max_level) {
u64 numa_set = kvm_get_first_binded_numa_set(kvm);
ret = kvm_cvm_ttt_create(cvm, ipa, level, numa_set);
if (ret)
return -ENXIO;
}
return 0;
}
static int kvm_cvm_create_protected_data_page(struct kvm *kvm, struct cvm *cvm,
unsigned long ipa, int level, struct page *src_page, u64 numa_set)
{
phys_addr_t src_phys = 0;
int ret;
if (src_page)
src_phys = page_to_phys(src_page);
ret = tmi_data_create(numa_set, cvm->rd, ipa, src_phys, level);
if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) {
/* Create missing RTTs and retry */
int level_fault = TMI_RETURN_INDEX(ret);
ret = kvm_cvm_create_ttt_levels(kvm, cvm, ipa, level_fault,
level, NULL);
if (ret)
goto err;
ret = tmi_data_create(numa_set, cvm->rd, ipa, src_phys, level);
}
if (ret)
goto err;
return 0;
err:
kvm_err("Cvm create protected data page fail:%d\n", ret);
return ret;
}
static u64 cvm_granule_size(u32 level)
{
return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
}
static bool is_data_create_region(phys_addr_t ipa_base,
struct kvm_cap_arm_tmm_populate_region_args *args)
{
if ((ipa_base >= args->populate_ipa_base1 &&
ipa_base < args->populate_ipa_base1 + args->populate_ipa_size1) ||
(ipa_base >= args->populate_ipa_base2 &&
ipa_base < args->populate_ipa_base2 + args->populate_ipa_size2))
return true;
return false;
}
int kvm_cvm_populate_par_region(struct kvm *kvm, u64 numa_set,
phys_addr_t ipa_base, phys_addr_t ipa_end,
struct kvm_cap_arm_tmm_populate_region_args *args)
{
struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
struct kvm_memory_slot *memslot;
gfn_t base_gfn, end_gfn;
int idx;
phys_addr_t ipa;
int ret = 0;
int level = TMM_TTT_LEVEL_3;
unsigned long map_size = cvm_granule_size(level);
base_gfn = gpa_to_gfn(ipa_base);
end_gfn = gpa_to_gfn(ipa_end);
idx = srcu_read_lock(&kvm->srcu);
memslot = gfn_to_memslot(kvm, base_gfn);
if (!memslot) {
ret = -EFAULT;
goto out;
}
/* We require the region to be contained within a single memslot */
if (memslot->base_gfn + memslot->npages < end_gfn) {
ret = -EINVAL;
goto out;
}
mmap_read_lock(current->mm);
ipa = ipa_base;
while (ipa < ipa_end) {
struct page *page = NULL;
kvm_pfn_t pfn = 0;
/*
* FIXME: This causes over mapping, but there's no good
* solution here with the ABI as it stands
*/
ipa = ALIGN_DOWN(ipa, map_size);
if (is_data_create_region(ipa, args)) {
pfn = gfn_to_pfn_memslot(memslot, gpa_to_gfn(ipa));
if (is_error_pfn(pfn)) {
ret = -EFAULT;
break;
}
page = pfn_to_page(pfn);
}
ret = kvm_cvm_create_protected_data_page(kvm, cvm, ipa, level, page, numa_set);
if (ret)
goto err_release_pfn;
ipa += map_size;
if (pfn)
kvm_release_pfn_dirty(pfn);
err_release_pfn:
if (ret) {
if (pfn)
kvm_release_pfn_clean(pfn);
break;
}
}
mmap_read_unlock(current->mm);
out:
srcu_read_unlock(&kvm->srcu, idx);
return ret;
}
static int kvm_create_tec(struct kvm_vcpu *vcpu)
{
int ret = 0;
int i;
u64 numa_set;
struct tmi_tec_params *params_ptr = NULL;
struct user_pt_regs *vcpu_regs = vcpu_gp_regs(vcpu);
u64 mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm;
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
tec->tec_run = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
if (!tec->tec_run) {
ret = -ENOMEM;
goto tec_free;
}
params_ptr = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
if (!params_ptr) {
ret = -ENOMEM;
goto tec_free;
}
for (i = 0; i < TEC_CREATE_NR_GPRS; ++i)
params_ptr->gprs[i] = vcpu_regs->regs[i];
params_ptr->pc = vcpu_regs->pc;
if (vcpu->vcpu_id == 0)
params_ptr->flags = TMI_RUNNABLE;
else
params_ptr->flags = TMI_NOT_RUNNABLE;
params_ptr->ram_size = cvm->ram_size;
numa_set = kvm_get_host_numa_set_by_vcpu(vcpu->vcpu_id, vcpu->kvm);
tec->tec = tmi_tec_create(numa_set, cvm->rd, mpidr, __pa(params_ptr));
tec->tec_created = true;
kfree(params_ptr);
return ret;
tec_free:
kfree(tec->tec_run);
kfree(tec);
kfree(params_ptr);
vcpu->arch.tec = NULL;
return ret;
}
int kvm_finalize_vcpu_tec(struct kvm_vcpu *vcpu)
{
int ret = 0;
mutex_lock(&vcpu->kvm->lock);
if (!vcpu->arch.tec) {
vcpu->arch.tec = kzalloc(sizeof(struct cvm_tec), GFP_KERNEL_ACCOUNT);
if (!vcpu->arch.tec) {
ret = -ENOMEM;
goto out;
}
}
ret = kvm_create_tec(vcpu);
out:
mutex_unlock(&vcpu->kvm->lock);
return ret;
}
static int config_cvm_hash_algo(struct tmi_cvm_params *params,
struct kvm_cap_arm_tmm_config_item *cfg)
{
switch (cfg->hash_algo) {
case KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA256:
if (!tmm_supports(TMI_FEATURE_REGISTER_0_HASH_SHA_256))
return -EINVAL;
break;
case KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA512:
if (!tmm_supports(TMI_FEATURE_REGISTER_0_HASH_SHA_512))
return -EINVAL;
break;
default:
return -EINVAL;
}
params->measurement_algo = cfg->hash_algo;
return 0;
}
static int config_cvm_sve(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg)
{
struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
struct tmi_cvm_params *params;
int max_sve_vq;
params = cvm->params;
max_sve_vq = u64_get_bits(tmm_feat_reg0,
TMI_FEATURE_REGISTER_0_SVE_VL);
if (!kvm_cvm_supports_sve())
return -EINVAL;
if (cfg->sve_vq > max_sve_vq)
return -EINVAL;
params->sve_vl = cfg->sve_vq;
params->flags |= TMI_CVM_PARAM_FLAG_SVE;
return 0;
}
static int config_cvm_pmu(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg)
{
struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
struct tmi_cvm_params *params;
int max_pmu_num_ctrs;
params = cvm->params;
max_pmu_num_ctrs = u64_get_bits(tmm_feat_reg0,
TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS);
if (!kvm_cvm_supports_pmu())
return -EINVAL;
if (cfg->num_pmu_cntrs > max_pmu_num_ctrs)
return -EINVAL;
params->pmu_num_cnts = cfg->num_pmu_cntrs;
params->flags |= TMI_CVM_PARAM_FLAG_PMU;
return 0;
}
static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap)
{
struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
struct kvm_cap_arm_tmm_config_item cfg;
int r = 0;
if (kvm_cvm_state(kvm) != CVM_STATE_NONE)
return -EBUSY;
if (copy_from_user(&cfg, (void __user *)cap->args[1], sizeof(cfg)))
return -EFAULT;
switch (cfg.cfg) {
case KVM_CAP_ARM_TMM_CFG_SVE:
r = config_cvm_sve(kvm, &cfg);
break;
case KVM_CAP_ARM_TMM_CFG_PMU:
r = config_cvm_pmu(kvm, &cfg);
break;
case KVM_CAP_ARM_TMM_CFG_HASH_ALGO:
r = config_cvm_hash_algo(cvm->params, &cfg);
break;
default:
r = -EINVAL;
}
return r;
}
static int kvm_cvm_map_range(struct kvm *kvm)
{
int ret;
u64 curr_numa_set;
int idx;
u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2);
struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
struct kvm_numa_info *numa_info = &cvm->numa_info;
gpa_t gpa;
curr_numa_set = kvm_get_first_binded_numa_set(kvm);
gpa = round_up(cvm->dtb_end, l2_granule);
for (idx = 0; idx < numa_info->numa_cnt; idx++) {
struct kvm_numa_node *numa_node = &numa_info->numa_nodes[idx];
if (idx)
gpa = numa_node->ipa_start;
if (gpa >= numa_node->ipa_start &&
gpa < numa_node->ipa_start + numa_node->ipa_size) {
ret = tmi_ttt_map_range(cvm->rd, gpa,
numa_node->ipa_size - gpa + numa_node->ipa_start,
curr_numa_set, numa_node->host_numa_nodes[0]);
if (ret) {
kvm_err("tmi_ttt_map_range failed: %d.\n", ret);
return ret;
}
}
}
return ret;
}
static int kvm_activate_cvm(struct kvm *kvm)
{
struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
if (kvm_cvm_state(kvm) != CVM_STATE_NEW)
return -EINVAL;
if (kvm_cvm_map_range(kvm))
return -EFAULT;
if (tmi_cvm_activate(cvm->rd)) {
kvm_err("tmi_cvm_activate failed!\n");
return -ENXIO;
}
WRITE_ONCE(cvm->state, CVM_STATE_ACTIVE);
kvm_info("cVM%d is activated!\n", cvm->cvm_vmid);
return 0;
}
static int kvm_populate_ram_region(struct kvm *kvm, u64 map_size,
phys_addr_t ipa_base, phys_addr_t ipa_end,
struct kvm_cap_arm_tmm_populate_region_args *args)
{
phys_addr_t gpa;
u64 numa_set = kvm_get_first_binded_numa_set(kvm);
for (gpa = ipa_base; gpa < ipa_end; gpa += map_size) {
if (kvm_cvm_populate_par_region(kvm, numa_set, gpa, gpa + map_size, args)) {
kvm_err("kvm_cvm_populate_par_region failed: %d\n", -EFAULT);
return -EFAULT;
}
}
return 0;
}
static int kvm_populate_ipa_cvm_range(struct kvm *kvm,
struct kvm_cap_arm_tmm_populate_region_args *args)
{
struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2);
phys_addr_t ipa_base1, ipa_end2;
if (kvm_cvm_state(kvm) != CVM_STATE_NEW)
return -EINVAL;
if (!IS_ALIGNED(args->populate_ipa_base1, PAGE_SIZE) ||
!IS_ALIGNED(args->populate_ipa_size1, PAGE_SIZE) ||
!IS_ALIGNED(args->populate_ipa_base2, PAGE_SIZE) ||
!IS_ALIGNED(args->populate_ipa_size2, PAGE_SIZE))
return -EINVAL;
if (args->populate_ipa_base1 < cvm->loader_start ||
args->populate_ipa_base2 < args->populate_ipa_base1 + args->populate_ipa_size1 ||
cvm->dtb_end < args->populate_ipa_base2 + args->populate_ipa_size2)
return -EINVAL;
if (args->flags & ~TMI_MEASURE_CONTENT)
return -EINVAL;
ipa_base1 = round_down(args->populate_ipa_base1, l2_granule);
ipa_end2 = round_up(args->populate_ipa_base2 + args->populate_ipa_size2, l2_granule);
return kvm_populate_ram_region(kvm, l2_granule, ipa_base1, ipa_end2, args);
}
int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
{
int r = 0;
mutex_lock(&kvm->lock);
switch (cap->args[0]) {
case KVM_CAP_ARM_TMM_CONFIG_CVM_HOST:
r = kvm_tmm_config_cvm(kvm, cap);
break;
case KVM_CAP_ARM_TMM_CREATE_RD:
r = kvm_arm_create_cvm(kvm);
break;
case KVM_CAP_ARM_TMM_POPULATE_CVM: {
struct kvm_cap_arm_tmm_populate_region_args args;
void __user *argp = u64_to_user_ptr(cap->args[1]);
if (copy_from_user(&args, argp, sizeof(args))) {
r = -EFAULT;
break;
}
r = kvm_populate_ipa_cvm_range(kvm, &args);
break;
}
case KVM_CAP_ARM_TMM_ACTIVATE_CVM:
r = kvm_activate_cvm(kvm);
break;
default:
r = -EINVAL;
break;
}
mutex_unlock(&kvm->lock);
return r;
}
void kvm_destroy_tec(struct kvm_vcpu *vcpu)
{
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
if (!vcpu_is_tec(vcpu))
return;
if (tmi_tec_destroy(tec->tec) != 0)
kvm_err("%s vcpu id : %d failed!\n", __func__, vcpu->vcpu_id);
tec->tec = 0;
kfree(tec->tec_run);
kfree(tec);
vcpu->arch.tec = NULL;
}
static int tmi_check_version(void)
{
u64 res;
int version_major;
int version_minor;
res = tmi_version();
if (res == SMCCC_RET_NOT_SUPPORTED)
return -ENXIO;
version_major = TMI_ABI_VERSION_GET_MAJOR(res);
version_minor = TMI_ABI_VERSION_GET_MINOR(res);
if (version_major != TMI_ABI_VERSION_MAJOR) {
kvm_err("Unsupported TMI_ABI (version %d %d)\n", version_major,
version_minor);
return -ENXIO;
}
kvm_info("TMI ABI version %d,%d\n", version_major, version_minor);
return 0;
}
int kvm_tec_enter(struct kvm_vcpu *vcpu)
{
struct tmi_tec_run *run;
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm;
if (READ_ONCE(cvm->state) != CVM_STATE_ACTIVE)
return -EINVAL;
run = tec->tec_run;
/* set/clear TWI TWE flags */
if (vcpu->arch.hcr_el2 & HCR_TWI)
run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFI;
else
run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFI;
if (vcpu->arch.hcr_el2 & HCR_TWE)
run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFE;
else
run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFE;
return tmi_tec_enter(tec->tec, __pa(run));
}
int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target)
{
int ret;
struct cvm_tec *calling_tec = (struct cvm_tec *)calling->arch.tec;
struct cvm_tec *target_tec = (struct cvm_tec *)target->arch.tec;
ret = tmi_psci_complete(calling_tec->tec, target_tec->tec);
if (ret)
return -EINVAL;
return 0;
}
int kvm_init_tmm(void)
{
int ret;
if (PAGE_SIZE != SZ_4K)
return 0;
if (tmi_check_version())
return 0;
ret = cvm_vmid_init();
if (ret)
return ret;
tmm_feat_reg0 = tmi_features(0);
kvm_info("TMM feature0: 0x%lx\n", tmm_feat_reg0);
static_branch_enable(&kvm_cvm_is_available);
return 0;
}
static bool is_numa_ipa_range_valid(struct kvm_numa_info *numa_info)
{
unsigned long i;
struct kvm_numa_node *numa_node, *prev_numa_node;
prev_numa_node = NULL;
for (i = 0; i < numa_info->numa_cnt; i++) {
numa_node = &numa_info->numa_nodes[i];
if (numa_node->ipa_start + numa_node->ipa_size < numa_node->ipa_start)
return false;
if (prev_numa_node &&
numa_node->ipa_start < prev_numa_node->ipa_start + prev_numa_node->ipa_size)
return false;
prev_numa_node = numa_node;
}
if (numa_node->ipa_start + numa_node->ipa_size > CVM_IPA_MAX_VAL)
return false;
return true;
}
int kvm_load_user_data(struct kvm *kvm, unsigned long arg)
{
struct kvm_user_data user_data;
void __user *argp = (void __user *)arg;
struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
struct kvm_numa_info *numa_info;
if (!kvm_is_cvm(kvm))
return -EFAULT;
if (copy_from_user(&user_data, argp, sizeof(user_data)))
return -EINVAL;
numa_info = &user_data.numa_info;
if (numa_info->numa_cnt > MAX_NUMA_NODE)
return -EINVAL;
if (numa_info->numa_cnt > 0) {
unsigned long i, total_size = 0;
struct kvm_numa_node *numa_node = &numa_info->numa_nodes[0];
unsigned long ipa_end = numa_node->ipa_start + numa_node->ipa_size;
if (!is_numa_ipa_range_valid(numa_info))
return -EINVAL;
if (user_data.loader_start < numa_node->ipa_start ||
user_data.dtb_end > ipa_end)
return -EINVAL;
for (i = 0; i < numa_info->numa_cnt; i++)
total_size += numa_info->numa_nodes[i].ipa_size;
if (total_size != user_data.ram_size)
return -EINVAL;
}
if (user_data.image_end <= user_data.loader_start ||
user_data.initrd_start < user_data.image_end ||
user_data.dtb_end < user_data.initrd_start ||
user_data.ram_size < user_data.dtb_end - user_data.loader_start)
return -EINVAL;
cvm->loader_start = user_data.loader_start;
cvm->image_end = user_data.image_end;
cvm->initrd_start = user_data.initrd_start;
cvm->dtb_end = user_data.dtb_end;
cvm->ram_size = user_data.ram_size;
memcpy(&cvm->numa_info, numa_info, sizeof(struct kvm_numa_info));
return 0;
}
void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu)
{
kvm_timer_vcpu_put(vcpu);
kvm_vgic_put(vcpu);
vcpu->cpu = -1;
}
unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu,
unsigned long target_affinity, unsigned long lowest_affinity_level)
{
struct kvm_vcpu *target_vcpu;
if (lowest_affinity_level != 0)
return PSCI_RET_INVALID_PARAMS;
target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, target_affinity);
if (!target_vcpu)
return PSCI_RET_INVALID_PARAMS;
cvm_psci_complete(vcpu, target_vcpu);
return PSCI_RET_SUCCESS;
}
int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool serror_pending, bool ext_dabt_pending)
{
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
if (serror_pending)
return -EINVAL;
if (ext_dabt_pending) {
if (!(((struct tmi_tec_run *)tec->tec_run)->tec_entry.flags &
TEC_ENTRY_FLAG_EMUL_MMIO))
return -EINVAL;
((struct tmi_tec_run *)tec->tec_run)->tec_entry.flags
&= ~TEC_ENTRY_FLAG_EMUL_MMIO;
((struct tmi_tec_run *)tec->tec_run)->tec_entry.flags
|= TEC_ENTRY_FLAG_INJECT_SEA;
}
return 0;
}
int kvm_create_cvm_vm(struct kvm *kvm)
{
struct cvm *cvm;
if (!static_key_enabled(&kvm_cvm_is_available))
return -EFAULT;
if (kvm->arch.cvm) {
kvm_info("cvm already create.\n");
return 0;
}
kvm->arch.cvm = kzalloc(sizeof(struct cvm), GFP_KERNEL_ACCOUNT);
if (!kvm->arch.cvm)
return -ENOMEM;
cvm = (struct cvm *)kvm->arch.cvm;
cvm->is_cvm = true;
return 0;
}
int kvm_init_cvm_vm(struct kvm *kvm)
{
struct tmi_cvm_params *params;
struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
params = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
if (!params)
return -ENOMEM;
cvm->params = params;
WRITE_ONCE(cvm->state, CVM_STATE_NONE);
return 0;
}

239
arch/arm64/kvm/cvm_exit.c Normal file
View File

@ -0,0 +1,239 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2024, The Linux Foundation. All rights reserved.
*/
#include <linux/kvm_host.h>
#include <kvm/arm_hypercalls.h>
#include <kvm/arm_psci.h>
#include <asm/kvm_tmi.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_coproc.h>
typedef int (*exit_handler_fn)(struct kvm_vcpu *vcpu);
static void update_arch_timer_irq_lines(struct kvm_vcpu *vcpu, bool unmask_ctl)
{
struct tmi_tec_run *run;
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
run = tec->tec_run;
__vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = run->tec_exit.cntv_ctl;
__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = run->tec_exit.cntv_cval;
__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = run->tec_exit.cntp_ctl;
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = run->tec_exit.cntp_cval;
/* Because the timer mask is tainted by TMM, we don't know the
* true intent of the guest. Here, we assume mask is always
* cleared during WFI.
*/
if (unmask_ctl) {
__vcpu_sys_reg(vcpu, CNTV_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK;
__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK;
}
kvm_cvm_timers_update(vcpu);
}
static int tec_exit_reason_notimpl(struct kvm_vcpu *vcpu)
{
struct tmi_tec_run *run;
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
run = tec->tec_run;
pr_err("[vcpu %d] Unhandled exit reason from cvm (ESR: %#llx)\n",
vcpu->vcpu_id, run->tec_exit.esr);
return -ENXIO;
}
/* The process is the same as kvm_handle_wfx,
* except the tracing and updating operation for pc,
* we copy kvm_handle_wfx process here
* to avoid changing kvm_handle_wfx function.
*/
static int tec_exit_wfx(struct kvm_vcpu *vcpu)
{
u64 esr = kvm_vcpu_get_esr(vcpu);
if (esr & ESR_ELx_WFx_ISS_WFE)
vcpu->stat.wfe_exit_stat++;
else
vcpu->stat.wfi_exit_stat++;
if (esr & ESR_ELx_WFx_ISS_WFxT) {
if (esr & ESR_ELx_WFx_ISS_RV) {
u64 val, now;
now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
if (now >= val)
goto out;
} else {
/* Treat WFxT as WFx if RN is invalid */
esr &= ~ESR_ELx_WFx_ISS_WFxT;
}
}
if (esr & ESR_ELx_WFx_ISS_WFE) {
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
} else {
if (esr & ESR_ELx_WFx_ISS_WFxT)
vcpu->arch.flags |= KVM_ARM64_WFIT;
kvm_vcpu_block(vcpu);
vcpu->arch.flags &= ~KVM_ARM64_WFIT;
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
out:
return 1;
}
static int tec_exit_sys_reg(struct kvm_vcpu *vcpu)
{
int ret;
struct tmi_tec_run *run;
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
unsigned long esr = kvm_vcpu_get_esr(vcpu);
int rt = kvm_vcpu_sys_get_rt(vcpu);
bool is_write = !(esr & 1);
run = tec->tec_run;
if (is_write)
vcpu_set_reg(vcpu, rt, run->tec_exit.gprs[0]);
ret = kvm_handle_sys_reg(vcpu);
if (ret >= 0 && !is_write)
run->tec_entry.gprs[0] = vcpu_get_reg(vcpu, rt);
return ret;
}
static int tec_exit_sync_dabt(struct kvm_vcpu *vcpu)
{
struct tmi_tec_run *run;
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
run = tec->tec_run;
if (kvm_vcpu_dabt_iswrite(vcpu) && kvm_vcpu_dabt_isvalid(vcpu)) {
vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu),
run->tec_exit.gprs[0]);
}
return kvm_handle_guest_abort(vcpu);
}
static int tec_exit_sync_iabt(struct kvm_vcpu *vcpu)
{
struct tmi_tec_run *run;
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
run = tec->tec_run;
pr_err("[vcpu %d] Unhandled instruction abort (ESR: %#llx).\n",
vcpu->vcpu_id, run->tec_exit.esr);
return -ENXIO;
}
static exit_handler_fn tec_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = tec_exit_reason_notimpl,
[ESR_ELx_EC_WFx] = tec_exit_wfx,
[ESR_ELx_EC_SYS64] = tec_exit_sys_reg,
[ESR_ELx_EC_DABT_LOW] = tec_exit_sync_dabt,
[ESR_ELx_EC_IABT_LOW] = tec_exit_sync_iabt
};
static int tec_exit_psci(struct kvm_vcpu *vcpu)
{
int i;
struct tmi_tec_run *run;
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
run = tec->tec_run;
for (i = 0; i < TEC_EXIT_NR_GPRS; ++i)
vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]);
return kvm_psci_call(vcpu);
}
static int tec_exit_host_call(struct kvm_vcpu *vcpu)
{
int ret, i;
struct tmi_tec_run *run;
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
run = tec->tec_run;
vcpu->stat.hvc_exit_stat++;
for (i = 0; i < TEC_EXIT_NR_GPRS; ++i)
vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]);
ret = kvm_hvc_call_handler(vcpu);
if (ret < 0) {
vcpu_set_reg(vcpu, 0, ~0UL);
ret = 1;
}
for (i = 0; i < TEC_EXIT_NR_GPRS; ++i)
run->tec_entry.gprs[i] = vcpu_get_reg(vcpu, i);
return ret;
}
/*
* Return > 0 to return to guest, < 0 on error, 0(and set exit_reason) on
* proper exit to userspace
*/
int handle_cvm_exit(struct kvm_vcpu *vcpu, int tec_run_ret)
{
unsigned long status;
struct tmi_tec_run *run;
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
u8 esr_ec;
bool is_wfx;
run = tec->tec_run;
esr_ec = ESR_ELx_EC(run->tec_exit.esr);
status = TMI_RETURN_STATUS(tec_run_ret);
if (status == TMI_ERROR_CVM_POWEROFF) {
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN;
return 0;
}
if (status == TMI_ERROR_CVM_STATE) {
vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
return 0;
}
if (tec_run_ret)
return -ENXIO;
vcpu->arch.fault.esr_el2 = run->tec_exit.esr;
vcpu->arch.fault.far_el2 = run->tec_exit.far;
vcpu->arch.fault.hpfar_el2 = run->tec_exit.hpfar;
is_wfx = (run->tec_exit.exit_reason == TMI_EXIT_SYNC) && (esr_ec == ESR_ELx_EC_WFx);
update_arch_timer_irq_lines(vcpu, is_wfx);
run->tec_entry.flags = 0;
switch (run->tec_exit.exit_reason) {
case TMI_EXIT_FIQ:
case TMI_EXIT_IRQ:
return 1;
case TMI_EXIT_PSCI:
return tec_exit_psci(vcpu);
case TMI_EXIT_SYNC:
return tec_exit_handlers[esr_ec](vcpu);
case TMI_EXIT_HOST_CALL:
return tec_exit_host_call(vcpu);
}
kvm_pr_unimpl("Unsupported exit reason : 0x%llx\n",
run->tec_exit.exit_reason);
return 0;
}

View File

@ -27,6 +27,10 @@
#include <asm/kvm_coproc.h>
#include <asm/sigcontext.h>
#ifdef CONFIG_CVM_HOST
#include <asm/kvm_tmi.h>
#endif
#include "trace.h"
struct kvm_stats_debugfs_item debugfs_entries[] = {
@ -783,6 +787,10 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool has_esr = events->exception.serror_has_esr;
bool ext_dabt_pending = events->exception.ext_dabt_pending;
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu))
return kvm_cvm_vcpu_set_events(vcpu, serror_pending, ext_dabt_pending);
#endif
if (serror_pending && has_esr) {
if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
return -EINVAL;

View File

@ -81,25 +81,53 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
*
* @vcpu: the vcpu pointer
*
* WFE: Yield the CPU and come back to this vcpu when the scheduler
* WFE[T]: Yield the CPU and come back to this vcpu when the scheduler
* decides to.
* WFI: Simply call kvm_vcpu_block(), which will halt execution of
* world-switches and schedule other host processes until there is an
* incoming IRQ or FIQ to the VM.
* WFIT: Same as WFI, with a timed wakeup implemented as a background timer
*
* WF{I,E}T can immediately return if the deadline has already expired.
*/
static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
{
if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
u64 esr = kvm_vcpu_get_esr(vcpu);
if (esr & ESR_ELx_WFx_ISS_WFE) {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
vcpu->stat.wfe_exit_stat++;
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
} else {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++;
}
if (esr & ESR_ELx_WFx_ISS_WFxT) {
if (esr & ESR_ELx_WFx_ISS_RV) {
u64 val, now;
now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
if (now >= val)
goto out;
} else {
/* Treat WFxT as WFx if RN is invalid */
esr &= ~ESR_ELx_WFx_ISS_WFxT;
}
}
if (esr & ESR_ELx_WFx_ISS_WFE) {
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
} else {
if (esr & ESR_ELx_WFx_ISS_WFxT)
vcpu->arch.flags |= KVM_ARM64_WFIT;
kvm_vcpu_block(vcpu);
vcpu->arch.flags &= ~KVM_ARM64_WFIT;
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
out:
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;

View File

@ -7,6 +7,9 @@
#include <linux/of.h>
#include <linux/init.h>
#include <linux/kvm_host.h>
#ifdef CONFIG_CVM_HOST
#include <asm/kvm_tmi.h>
#endif
#include "hisi_virt.h"
static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE;
@ -157,6 +160,10 @@ static void hardware_disable_dvmbm(void *data)
bool hisi_dvmbm_supported(void)
{
#ifdef CONFIG_CVM_HOST
if (static_branch_unlikely(&kvm_cvm_is_enable))
return false;
#endif
if (cpu_type != HI_IP09)
return false;

View File

@ -8,6 +8,10 @@
#include <asm/kvm_emulate.h>
#include <trace/events/kvm.h>
#ifdef CONFIG_CVM_HOST
#include <asm/kvm_tmi.h>
#endif
#include "trace.h"
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
@ -109,6 +113,14 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
&data);
data = vcpu_data_host_to_guest(vcpu, data, len);
vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu)) {
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
((struct tmi_tec_run *)tec->tec_run)->
tec_entry.gprs[0] = data;
}
#endif
}
/*
@ -177,7 +189,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
vcpu->mmio_needed = 1;
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu)) {
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
((struct tmi_tec_run *)tec->tec_run)->tec_entry.flags |=
TEC_ENTRY_FLAG_EMUL_MMIO;
}
#endif
if (!ret) {
/* We handled the access successfully in the kernel. */
if (!is_write)

View File

@ -476,6 +476,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
spin_lock(&kvm->mmu_lock);
pgt = mmu->pgt;
if (pgt) {
mmu->pgd_phys = 0;
mmu->pgt = NULL;
@ -753,6 +754,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
write_fault = kvm_is_write_fault(vcpu);
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu)) {
write_fault = true;
prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
}
#endif
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
VM_BUG_ON(write_fault && exec_fault);

View File

@ -13,6 +13,7 @@
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
#include <asm/kvm_tmi.h>
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
@ -369,6 +370,17 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
{
u64 reg = 0;
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu)) {
struct tmi_tec_run *run;
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
run = tec->tec_run;
reg = run->tec_exit.pmu_ovf_status;
return reg;
}
#endif
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);

View File

@ -16,6 +16,9 @@
#include <kvm/arm_psci.h>
#include <kvm/arm_hypercalls.h>
#ifdef CONFIG_CVM_HOST
#include <asm/kvm_tmi.h>
#endif
/*
* This is an implementation of the Power State Coordination Interface
* as described in ARM document number ARM DEN 0022A.
@ -78,6 +81,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu))
cvm_psci_complete(source_vcpu, vcpu);
#endif
if (!vcpu->arch.power_off) {
if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
@ -133,7 +140,10 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
/* Ignore other bits of target affinity */
target_affinity &= target_affinity_mask;
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu))
return cvm_psci_vcpu_affinity_info(vcpu, target_affinity, lowest_affinity_level);
#endif
/*
* If one or more VCPU matching target affinity are running
* then ON else OFF

View File

@ -30,6 +30,9 @@
#include <asm/kvm_mmu.h>
#include <asm/virt.h>
#ifdef CONFIG_CVM_HOST
#include <asm/kvm_tmi.h>
#endif
/* Maximum phys_shift supported for any VM on this host */
static u32 kvm_ipa_limit;
@ -183,6 +186,12 @@ int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
return -EPERM;
return kvm_vcpu_finalize_sve(vcpu);
#ifdef CONFIG_CVM_HOST
case KVM_ARM_VCPU_TEC:
if (!kvm_is_cvm(vcpu->kvm))
return -EINVAL;
return kvm_finalize_vcpu_tec(vcpu);
#endif
}
return -EINVAL;
@ -199,6 +208,9 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
kfree(vcpu->arch.sve_state);
#ifdef CONFIG_CVM_HOST
kvm_destroy_tec(vcpu);
#endif
}
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
@ -431,7 +443,11 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
u32 parange, phys_shift;
u8 lvls;
#ifdef CONFIG_CVM_HOST
if ((type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) && (!kvm_is_cvm(kvm)))
#else
if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
#endif
return -EINVAL;
phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);

View File

@ -1234,6 +1234,9 @@ static u64 read_id_reg(struct kvm_vcpu *vcpu,
(0xfUL << ID_AA64ISAR1_API_SHIFT) |
(0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
(0xfUL << ID_AA64ISAR1_GPI_SHIFT));
} else if (id == SYS_ID_AA64ISAR2_EL1) {
if (!cpus_have_final_cap(ARM64_HAS_WFXT))
val &= ~(0xfUL << ID_AA64ISAR2_WFXT_SHIFT);
} else if (id == SYS_ID_AA64DFR0_EL1) {
/* Limit guests to PMUv3 for ARMv8.1 */
val = cpuid_feature_cap_perfmon_field(val,

129
arch/arm64/kvm/tmi.c Normal file
View File

@ -0,0 +1,129 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2024, The Linux Foundation. All rights reserved.
*/
#include <linux/arm-smccc.h>
#include <asm/kvm_tmi.h>
#include <asm/memory.h>
u64 tmi_version(void)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_VERSION_REQ, &res);
return res.a1;
}
u64 tmi_data_create(u64 numa_set, u64 rd, u64 map_addr, u64 src, u64 level)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_DATA_CREATE, numa_set, rd, map_addr, src, level, &res);
return res.a1;
}
u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_DATA_DESTROY, rd, map_addr, level, &res);
return res.a1;
}
u64 tmi_cvm_activate(u64 rd)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_CVM_ACTIVATE, rd, &res);
return res.a1;
}
u64 tmi_cvm_create(u64 params_ptr, u64 numa_set)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_CVM_CREATE, params_ptr, numa_set, &res);
return res.a1;
}
u64 tmi_cvm_destroy(u64 rd)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_CVM_DESTROY, rd, &res);
return res.a1;
}
u64 tmi_tec_create(u64 numa_set, u64 rd, u64 mpidr, u64 params_ptr)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_TEC_CREATE, numa_set, rd, mpidr, params_ptr, &res);
return res.a1;
}
u64 tmi_tec_destroy(u64 tec)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_TEC_DESTROY, tec, &res);
return res.a1;
}
u64 tmi_tec_enter(u64 tec, u64 run_ptr)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_TEC_ENTER, tec, run_ptr, &res);
return res.a1;
}
u64 tmi_ttt_create(u64 numa_set, u64 rd, u64 map_addr, u64 level)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_TTT_CREATE, numa_set, rd, map_addr, level, &res);
return res.a1;
}
u64 tmi_psci_complete(u64 calling_tec, u64 target_tec)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_PSCI_COMPLETE, calling_tec, target_tec, &res);
return res.a1;
}
u64 tmi_features(u64 index)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_FEATURES, index, &res);
return res.a1;
}
u64 tmi_mem_info_show(u64 mem_info_addr)
{
struct arm_smccc_res res;
u64 pa_addr = __pa(mem_info_addr);
arm_smccc_1_1_smc(TMI_TMM_MEM_INFO_SHOW, pa_addr, &res);
return res.a1;
}
EXPORT_SYMBOL_GPL(tmi_mem_info_show);
u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_TTT_MAP_RANGE, rd, map_addr, size, cur_node, target_node, &res);
return res.a1;
}
u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(TMI_TMM_TTT_UNMAP_RANGE, rd, map_addr, size, node_id, &res);
return res.a1;
}

View File

@ -10,6 +10,10 @@
#include <asm/kvm_mmu.h>
#include <asm/kvm_asm.h>
#ifdef CONFIG_CVM_HOST
#include <asm/kvm_tmi.h>
#endif
#include "vgic.h"
static bool group0_trap;
@ -673,7 +677,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
kvm_vgic_global_state.vcpu_base = 0;
} else {
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.can_emulate_gicv2 = true;
#ifdef CONFIG_CVM_HOST
if (!static_branch_unlikely(&kvm_cvm_is_available))
#endif
kvm_vgic_global_state.can_emulate_gicv2 = true;
ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
if (ret) {
kvm_err("Cannot register GICv2 KVM device.\n");
@ -735,6 +742,15 @@ void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu)) {
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
cpu_if->vgic_vmcr =
((struct tmi_tec_run *)tec->tec_run)->tec_exit.gicv3_vmcr;
return;
}
#endif
if (likely(cpu_if->vgic_sre))
cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
}

View File

@ -12,6 +12,10 @@
#include <asm/kvm_hyp.h>
#ifdef CONFIG_CVM_HOST
#include <asm/kvm_tmi.h>
#endif
#include "vgic.h"
#define CREATE_TRACE_POINTS
@ -849,12 +853,48 @@ static inline bool can_access_vgic_from_kernel(void)
return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
}
#ifdef CONFIG_CVM_HOST
static inline void vgic_tmm_save_state(struct kvm_vcpu *vcpu)
{
int i;
struct tmi_tec_run *tec_run;
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
tec_run = tec->tec_run;
for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) {
cpu_if->vgic_lr[i] = tec_run->tec_exit.gicv3_lrs[i];
tec_run->tec_entry.gicv3_lrs[i] = 0;
}
}
static inline void vgic_tmm_restore_state(struct kvm_vcpu *vcpu)
{
int i;
struct tmi_tec_run *tec_run;
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
tec_run = tec->tec_run;
for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) {
tec_run->tec_entry.gicv3_lrs[i] = cpu_if->vgic_lr[i];
tec_run->tec_exit.gicv3_lrs[i] = cpu_if->vgic_lr[i];
}
}
#endif
static inline void vgic_save_state(struct kvm_vcpu *vcpu)
{
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_save_state(vcpu);
else
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu))
vgic_tmm_save_state(vcpu);
else
#endif
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
@ -884,7 +924,12 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_restore_state(vcpu);
else
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu))
vgic_tmm_restore_state(vcpu);
else
#endif
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
/* Flush our emulation state into the GIC hardware before entering the guest. */
@ -925,7 +970,10 @@ void kvm_vgic_load(struct kvm_vcpu *vcpu)
{
if (unlikely(!vgic_initialized(vcpu->kvm)))
return;
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu))
return;
#endif
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_load(vcpu);
else
@ -936,7 +984,10 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
{
if (unlikely(!vgic_initialized(vcpu->kvm)))
return;
#ifdef CONFIG_CVM_HOST
if (vcpu_is_tec(vcpu))
return;
#endif
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_put(vcpu);
else

View File

@ -27,7 +27,17 @@ void __delay(unsigned long cycles)
{
cycles_t start = get_cycles();
if (arch_timer_evtstrm_available()) {
if (cpus_have_const_cap(ARM64_HAS_WFXT)) {
u64 end = start + cycles;
/*
* Start with WFIT. If an interrupt makes us resume
* early, use a WFET loop to complete the delay.
*/
wfit(end);
while ((get_cycles() - start) < cycles)
wfet(end);
} else if (arch_timer_evtstrm_available()) {
const cycles_t timer_evt_period =
USECS_TO_CYCLES(ARCH_TIMER_EVT_STREAM_PERIOD_US);

View File

@ -43,6 +43,7 @@
#include <linux/sizes.h>
#include <asm/tlb.h>
#include <asm/alternative.h>
#include <asm/set_memory.h>
/*
* We need to be able to catch inadvertent references to memstart_addr
@ -631,6 +632,8 @@ void __init mem_init(void)
else
swiotlb_force = SWIOTLB_NO_FORCE;
swiotlb_cvm_update_mem_attributes();
set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
#ifndef CONFIG_SPARSEMEM_VMEMMAP

View File

@ -40,6 +40,7 @@
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm/kfence.h>
#include <asm/set_memory.h>
u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
EXPORT_SYMBOL(idmap_t0sz);
@ -663,7 +664,7 @@ static void __init map_mem(pgd_t *pgdp)
early_kfence_pool = arm64_kfence_alloc_pool();
if (!can_set_block_and_cont_map() ||
(split_disabled && can_set_direct_map()))
(split_disabled && can_set_direct_map()) || is_cvm_world())
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*
@ -1958,7 +1959,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
* it is possible to protect/unprotect single pages in the KFENCE pool.
*/
if (!can_set_block_and_cont_map() ||
(split_disabled && can_set_direct_map()))
(split_disabled && can_set_direct_map()) || is_cvm_world())
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),

View File

@ -218,6 +218,9 @@ int set_direct_map_default_noflush(struct page *page)
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (is_cvm_world())
return;
if (!can_set_direct_map())
return;

View File

@ -29,6 +29,10 @@
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#ifdef CONFIG_CVM_GUEST
#include <linux/swiotlb.h>
#include <asm/cvm_guest.h>
#endif
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
@ -299,6 +303,91 @@ static DEFINE_IDA(its_vpeid_ida);
extern bool ft2000_iommu_hwfix;
#endif
#ifdef CONFIG_CVM_GUEST
static struct device cvm_alloc_device;
static LIST_HEAD(cvm_its_nodes);
static raw_spinlock_t cvm_its_lock;
struct its_device_order {
struct its_device *dev;
struct list_head entry;
int itt_order;
};
static inline struct page *its_alloc_shared_pages_node(int node, gfp_t gfp,
unsigned int order)
{
return swiotlb_alloc(&cvm_alloc_device, (1 << order) * PAGE_SIZE);
}
static inline struct page *its_alloc_shared_pages(gfp_t gfp, unsigned int order)
{
return its_alloc_shared_pages_node(NUMA_NO_NODE, gfp, order);
}
static void its_free_shared_pages(void *addr, int order)
{
if (order < 0)
return;
swiotlb_free(&cvm_alloc_device, (struct page *)addr, (1 << order) * PAGE_SIZE);
}
static int add_its_device_order(struct its_device *dev, int itt_order)
{
struct its_device_order *new;
unsigned long flags;
new = kmalloc(sizeof(struct its_device_order), GFP_KERNEL);
if (!new)
return -ENOMEM;
new->dev = dev;
new->itt_order = itt_order;
raw_spin_lock_irqsave(&cvm_its_lock, flags);
list_add_tail(&new->entry, &cvm_its_nodes);
raw_spin_unlock_irqrestore(&cvm_its_lock, flags);
return 0;
}
/* get its device order and then free its device order */
static int get_its_device_order(struct its_device *dev)
{
struct its_device_order *pos, *tmp;
unsigned long flags;
int itt_order = -1;
raw_spin_lock_irqsave(&cvm_its_lock, flags);
list_for_each_entry_safe(pos, tmp, &cvm_its_nodes, entry) {
if (pos->dev == dev) {
itt_order = pos->itt_order;
list_del(&pos->entry);
kfree(pos);
goto found;
}
}
found:
raw_spin_unlock_irqrestore(&cvm_its_lock, flags);
return itt_order;
}
static void *its_alloc_shared_page_address(struct its_device *dev,
struct its_node *its, int sz)
{
struct page *page;
int itt_order;
itt_order = get_order(sz);
if (add_its_device_order(dev, itt_order))
return NULL;
page = its_alloc_shared_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
itt_order);
if (!page)
return NULL;
return (void *)page_address(page);
}
#endif
#ifdef CONFIG_VIRT_PLAT_DEV
static void free_devid_to_rsv_pools(struct its_device *its_dev)
{
@ -2373,7 +2462,13 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
{
struct page *prop_page;
prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
prop_page = its_alloc_shared_pages(gfp_flags,
get_order(LPI_PROPBASE_SZ));
else
#endif
prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
if (!prop_page)
return NULL;
@ -2384,8 +2479,14 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
static void its_free_prop_table(struct page *prop_page)
{
free_pages((unsigned long)page_address(prop_page),
get_order(LPI_PROPBASE_SZ));
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
its_free_shared_pages(page_address(prop_page),
get_order(LPI_PROPBASE_SZ));
else
#endif
free_pages((unsigned long)page_address(prop_page),
get_order(LPI_PROPBASE_SZ));
}
static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
@ -2507,7 +2608,13 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
order = get_order(GITS_BASER_PAGES_MAX * psz);
}
page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
page = its_alloc_shared_pages_node(its->numa_node,
GFP_KERNEL | __GFP_ZERO, order);
else
#endif
page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
if (!page)
return -ENOMEM;
@ -2520,7 +2627,12 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
/* 52bit PA is supported only when PageSize=64K */
if (psz != SZ_64K) {
pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
free_pages((unsigned long)base, order);
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
its_free_shared_pages(base, order);
else
#endif
free_pages((unsigned long)base, order);
return -ENXIO;
}
@ -2574,7 +2686,12 @@ retry_baser:
pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
&its->phys_base, its_base_type_string[type],
val, tmp);
free_pages((unsigned long)base, order);
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
its_free_shared_pages(base, order);
else
#endif
free_pages((unsigned long)base, order);
return -ENXIO;
}
@ -2713,8 +2830,14 @@ static void its_free_tables(struct its_node *its)
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
if (its->tables[i].base) {
free_pages((unsigned long)its->tables[i].base,
its->tables[i].order);
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
its_free_shared_pages(its->tables[i].base,
its->tables[i].order);
else
#endif
free_pages((unsigned long)its->tables[i].base,
its->tables[i].order);
its->tables[i].base = NULL;
}
}
@ -2977,7 +3100,13 @@ static bool allocate_vpe_l2_table(int cpu, u32 id)
/* Allocate memory for 2nd level table */
if (!table[idx]) {
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
page = its_alloc_shared_pages(GFP_KERNEL | __GFP_ZERO,
get_order(psz));
else
#endif
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
if (!page)
return false;
@ -3096,7 +3225,13 @@ static int allocate_vpe_l1_table(void)
pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
np, npg, psz, epp, esz);
page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
page = its_alloc_shared_pages(GFP_ATOMIC | __GFP_ZERO,
get_order(np * PAGE_SIZE));
else
#endif
page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
if (!page)
return -ENOMEM;
@ -3140,8 +3275,14 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
{
struct page *pend_page;
pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
get_order(LPI_PENDBASE_SZ));
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
pend_page = its_alloc_shared_pages(gfp_flags | __GFP_ZERO,
get_order(LPI_PENDBASE_SZ));
else
#endif
pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
get_order(LPI_PENDBASE_SZ));
if (!pend_page)
return NULL;
@ -3153,7 +3294,13 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
static void its_free_pending_table(struct page *pt)
{
free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
its_free_shared_pages(page_address(pt),
get_order(LPI_PENDBASE_SZ));
else
#endif
free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
}
/*
@ -3483,8 +3630,15 @@ static bool its_alloc_table_entry(struct its_node *its,
/* Allocate memory for 2nd level table */
if (!table[idx]) {
page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
get_order(baser->psz));
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
page = its_alloc_shared_pages_node(its->numa_node,
GFP_KERNEL | __GFP_ZERO,
get_order(baser->psz));
else
#endif
page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
get_order(baser->psz));
if (!page)
return false;
@ -3587,7 +3741,12 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
nr_ites = max(2, nvecs);
sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
itt = its_alloc_shared_page_address(dev, its, sz);
else
#endif
itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
if (alloc_lpis) {
lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
@ -3601,7 +3760,12 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
kfree(dev);
kfree(itt);
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
its_free_shared_pages(itt, get_order(sz));
else
#endif
kfree(itt);
kfree(lpi_map);
kfree(col_map);
return NULL;
@ -3638,7 +3802,12 @@ static void its_free_device(struct its_device *its_dev)
list_del(&its_dev->entry);
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
kfree(its_dev->event_map.col_map);
kfree(its_dev->itt);
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
its_free_shared_pages(its_dev->itt, get_its_device_order(its_dev));
else
#endif
kfree(its_dev->itt);
#ifdef CONFIG_VIRT_PLAT_DEV
if (its_dev->is_vdev) {
@ -5289,8 +5458,15 @@ static int __init its_probe_one(struct resource *res,
its->numa_node = numa_node;
page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
get_order(ITS_CMD_QUEUE_SZ));
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
page = its_alloc_shared_pages_node(its->numa_node,
GFP_KERNEL | __GFP_ZERO,
get_order(ITS_CMD_QUEUE_SZ));
else
#endif
page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
get_order(ITS_CMD_QUEUE_SZ));
if (!page) {
err = -ENOMEM;
goto out_unmap_sgir;
@ -5356,7 +5532,12 @@ static int __init its_probe_one(struct resource *res,
out_free_tables:
its_free_tables(its);
out_free_cmd:
free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world())
its_free_shared_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
else
#endif
free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
out_unmap_sgir:
if (its->sgir_base)
iounmap(its->sgir_base);
@ -5674,6 +5855,12 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
bool has_v4_1 = false;
int err;
#ifdef CONFIG_CVM_GUEST
if (is_cvm_world()) {
device_initialize(&cvm_alloc_device);
raw_spin_lock_init(&cvm_its_lock);
}
#endif
gic_rdists = rdists;
its_parent = parent_domain;

View File

@ -766,6 +766,23 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
#ifdef CONFIG_CVM_HOST
void arm_pmu_set_phys_irq(bool enable)
{
int cpu = get_cpu();
struct arm_pmu *pmu = per_cpu(cpu_armpmu, cpu);
int irq;
irq = armpmu_get_cpu_irq(pmu, cpu);
if (irq && !enable)
per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
else if (irq && enable)
per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
put_cpu();
}
#endif
#ifdef CONFIG_CPU_PM
static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
{

View File

@ -77,8 +77,6 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
bool kvm_timer_is_pending(struct kvm_vcpu *vcpu);
u64 kvm_phys_timer_read(void);
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
@ -107,4 +105,8 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
u32 timer_get_ctl(struct arch_timer_context *ctxt);
u64 timer_get_cval(struct arch_timer_context *ctxt);
#ifdef CONFIG_CVM_HOST
/* Needed for S-EL2 */
void kvm_cvm_timers_update(struct kvm_vcpu *vcpu);
#endif
#endif

View File

@ -326,6 +326,29 @@ struct kvm_vcpu {
struct kvm_vcpu_arch arch;
};
#ifdef CONFIG_CVM_HOST
#define KVM_TYPE_CVM_BIT 8
#define CVM_MAX_HALT_POLL_NS 100000
DECLARE_STATIC_KEY_FALSE(kvm_cvm_is_available);
static __always_inline bool vcpu_is_tec(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_cvm_is_available)) {
struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
return tec && tec->tec_run;
}
return false;
}
static inline bool kvm_arm_cvm_type(unsigned long type)
{
return type & (1UL << KVM_TYPE_CVM_BIT);
}
#endif
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
/*

View File

@ -163,6 +163,9 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif
#ifdef CONFIG_CVM_HOST
void arm_pmu_set_phys_irq(bool enable);
#endif
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void);

View File

@ -155,4 +155,21 @@ static inline void swiotlb_adjust_size(unsigned long size)
extern void swiotlb_print_info(void);
extern void swiotlb_set_max_segment(unsigned int);
#ifdef CONFIG_DMA_RESTRICTED_POOL
struct page *swiotlb_alloc(struct device *dev, size_t size);
bool swiotlb_free(struct device *dev, struct page *page, size_t size);
#else
static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
{
return NULL;
}
static inline bool swiotlb_free(struct device *dev, struct page *page,
size_t size)
{
return false;
}
#endif /* CONFIG_DMA_RESTRICTED_POOL */
#endif /* __LINUX_SWIOTLB_H */

View File

@ -1378,6 +1378,35 @@ struct id_registers {
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
#define KVM_LOAD_USER_DATA _IOW(KVMIO, 0x49, struct kvm_user_data)
#define KVM_CAP_ARM_TMM 300 /* FIXME: Large number to prevent conflicts */
#define MAX_NUMA_NODE 8
#define MAX_CPU_BIT_MAP 4
#define MAX_NUMA_BIT_MAP 2
struct kvm_numa_node {
__u64 numa_id;
__u64 ipa_start;
__u64 ipa_size;
__u64 host_numa_nodes[MAX_NUMA_BIT_MAP];
__u64 cpu_id[MAX_CPU_BIT_MAP];
};
struct kvm_numa_info {
__u64 numa_cnt;
struct kvm_numa_node numa_nodes[MAX_NUMA_NODE];
};
struct kvm_user_data {
__u64 loader_start;
__u64 image_end;
__u64 initrd_start;
__u64 dtb_end;
__u64 ram_size;
struct kvm_numa_info numa_info;
};
/* enable ucontrol for s390 */
struct kvm_s390_ucas_mapping {
__u64 user_addr;

View File

@ -84,6 +84,16 @@ config SWIOTLB
bool
select NEED_DMA_MAP_STATE
config DMA_RESTRICTED_POOL
bool "DMA Restricted Pool"
depends on OF && OF_RESERVED_MEM && SWIOTLB
help
This enables support for restricted DMA pools which provide a level of
DMA memory protection on systems with limited hardware protection
capabilities, such as those lacking an IOMMU.
If unsure, say "n".
#
# Should be selected if we can mmap non-coherent mappings to userspace.
# The only thing that is really required is a way to set an uncached bit

View File

@ -22,6 +22,13 @@
*/
unsigned int zone_dma_bits __ro_after_init = 24;
#ifndef is_swiotlb_for_alloc
static inline bool is_swiotlb_for_alloc(struct device *dev)
{
return false;
}
#endif
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
phys_addr_t phys)
{
@ -94,6 +101,15 @@ static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
return ret;
}
static void __dma_direct_free_pages(struct device *dev, struct page *page,
size_t size)
{
if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
swiotlb_free(dev, page, size))
return;
dma_free_contiguous(dev, page, size);
}
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp)
{
@ -105,6 +121,16 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_limit);
if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
is_swiotlb_for_alloc(dev)) {
page = swiotlb_alloc(dev, size);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
__dma_direct_free_pages(dev, page, size);
return NULL;
}
return page;
}
page = dma_alloc_contiguous(dev, size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, size);
@ -182,7 +208,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
gfp |= __GFP_NOWARN;
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
!force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
if (!page)
return NULL;
@ -195,18 +221,23 @@ void *dma_direct_alloc(struct device *dev, size_t size,
}
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!dev_is_dma_coherent(dev))
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) &&
!is_swiotlb_for_alloc(dev))
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
/*
* Remapping or decrypting memory may block. If either is required and
* we can't block, allocate the memory from the atomic pools.
* If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must
* set up another device coherent pool by shared-dma-pool and use
* dma_alloc_from_dev_coherent instead.
*/
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
!gfpflags_allow_blocking(gfp) &&
(force_dma_unencrypted(dev) ||
(IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev))))
(IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!dev_is_dma_coherent(dev))) &&
!is_swiotlb_for_alloc(dev))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
/* we always manually zero the memory once we are done */
@ -264,7 +295,7 @@ out_encrypt_pages:
if (dma_set_encrypted(dev, page_address(page), size))
return NULL;
out_free_pages:
dma_free_contiguous(dev, page, size);
__dma_direct_free_pages(dev, page, size);
return NULL;
}
@ -272,15 +303,15 @@ void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
!force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
dma_free_contiguous(dev, cpu_addr, size);
return;
}
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!dev_is_dma_coherent(dev)) {
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) &&
!is_swiotlb_for_alloc(dev)) {
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
return;
}
@ -298,7 +329,7 @@ void dma_direct_free(struct device *dev, size_t size,
else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
arch_dma_clear_uncached(cpu_addr, size);
dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
__dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
}
struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
@ -308,7 +339,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
void *ret;
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp))
force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
!is_swiotlb_for_alloc(dev))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
page = __dma_direct_alloc_pages(dev, size, gfp);
@ -332,7 +364,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
return page;
out_free_pages:
dma_free_contiguous(dev, page, size);
__dma_direct_free_pages(dev, page, size);
return NULL;
}
@ -349,7 +381,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
if (dma_set_encrypted(dev, vaddr, size))
return;
dma_free_contiguous(dev, page, size);
__dma_direct_free_pages(dev, page, size);
}
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \

View File

@ -849,3 +849,95 @@ static int __init swiotlb_create_debugfs(void)
late_initcall(swiotlb_create_debugfs);
#endif
#ifdef CONFIG_DMA_RESTRICTED_POOL
struct page *swiotlb_alloc(struct device *dev, size_t size)
{
phys_addr_t tlb_addr;
int index;
struct io_tlb_mem *mem = io_tlb_default_mem;
index = swiotlb_find_slots(dev, 0, size);
if (index == -1)
return NULL;
tlb_addr = slot_addr(mem->start, index);
return pfn_to_page(PFN_DOWN(tlb_addr));
}
static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr,
size_t alloc_size)
{
unsigned long flags;
unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
int i, count, nslots = nr_slots(alloc_size + offset);
struct io_tlb_mem *mem = io_tlb_default_mem;
int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
int aindex = index / mem->area_nslabs;
struct io_tlb_area *area = &mem->areas[aindex];
/*
* Return the buffer to the free list by setting the corresponding
* entries to indicate the number of contiguous entries available.
* While returning the entries to the free list, we merge the entries
* with slots below and above the pool being returned.
*/
spin_lock_irqsave(&area->lock, flags);
if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
count = mem->slots[index + nslots].list;
else
count = 0;
/*
* Step 1: return the slots to the free list, merging the slots with
* superceeding slots
*/
for (i = index + nslots - 1; i >= index; i--) {
mem->slots[i].list = ++count;
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
}
/*
* Step 2: merge the returned slots with the preceding slots, if
* available (non zero)
*/
for (i = index - 1;
io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
i--)
mem->slots[i].list = ++count;
area->used -= nslots;
spin_unlock_irqrestore(&area->lock, flags);
}
bool swiotlb_free(struct device *dev, struct page *page, size_t size)
{
phys_addr_t tlb_addr = page_to_phys(page);
if (!is_swiotlb_buffer(tlb_addr))
return false;
swiotlb_release_slots(dev, tlb_addr, size);
return true;
}
#ifdef CONFIG_CVM_GUEST
void __init swiotlb_cvm_update_mem_attributes(void)
{
void *vaddr;
unsigned long bytes;
struct io_tlb_mem *mem = io_tlb_default_mem;
if (!is_cvm_world() || !mem->start)
return;
vaddr = phys_to_virt(mem->start);
bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
set_cvm_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
memset(vaddr, 0, bytes);
}
#endif
#endif /* CONFIG_DMA_RESTRICTED_POOL */

View File

@ -14,3 +14,4 @@ ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o
ima-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o
ima-$(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) += ima_asymmetric_keys.o
ima-$(CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS) += ima_queue_keys.o
ima-$(CONFIG_CVM_GUEST) += ima_cvm.o

View File

@ -0,0 +1,77 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved.
*/
#include <asm/cvm_smc.h>
#include <asm/cvm_guest.h>
#include "ima_cvm.h"
static bool ima_tsi_cvm;
bool ima_cvm_available(void)
{
return ima_tsi_cvm;
}
int __init ima_cvm_init(void)
{
int rc = -ENODEV;
if (is_cvm_world() && tsi_get_version() != SMCCC_RET_NOT_SUPPORTED) {
ima_tsi_cvm = true;
rc = 0;
}
return rc;
}
int ima_calc_cvm_boot_aggregate(struct ima_digest_data *hash)
{
unsigned long result;
int hash_len;
struct cvm_config cfg = { 0 };
struct cvm_measurement cm = { 0 };
result = tsi_get_cvm_config(&cfg);
if (result != TSI_SUCCESS) {
pr_err("Error reading cvm config for boot aggregate\n");
return -EFAULT;
}
/* 0: SHA256, 1: SHA512 */
hash->algo = cfg.algorithm ? HASH_ALGO_SHA512 : HASH_ALGO_SHA256;
hash_len = hash_digest_size[hash->algo];
/* Read the measurement result of RIM as the boot aggregate */
cm.index = RIM_MEASUREMENT_SLOT;
result = tsi_measurement_read(&cm);
if (result != TSI_SUCCESS) {
pr_err("Error reading cvm measurement 0 for boot aggregate\n");
return -EFAULT;
}
memcpy(hash->digest, cm.value, hash_len);
return 0;
}
int ima_cvm_extend(struct tpm_digest *digests_arg)
{
struct cvm_measurement_extend cme;
if (!ima_tsi_cvm)
return 0;
/* Use index 1 as CVM IMA slot */
cme.index = 1;
cme.size = hash_digest_size[ima_hash_algo];
if (digests_arg)
memcpy(cme.value, digests_arg[ima_hash_algo_idx].digest,
cme.size);
else
memset(cme.value, 0xff, cme.size);
return tsi_measurement_extend(&cme) == TSI_SUCCESS ? 0 : -EFAULT;
}

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved.
*/
#ifndef __LINUX_IMA_CVM_H
#define __LINUX_IMA_CVM_H
#include "ima.h"
#ifdef CONFIG_CVM_GUEST
int __init ima_cvm_init(void);
bool ima_cvm_available(void);
int ima_cvm_extend(struct tpm_digest *digests_arg);
int ima_calc_cvm_boot_aggregate(struct ima_digest_data *hash);
#else
static inline int __init ima_cvm_init(void)
{
return -ENODEV;
}
static inline bool ima_cvm_available(void)
{
return false;
}
static inline int ima_cvm_extend(struct tpm_digest *digests_arg)
{
return -ENODEV;
}
static inline int ima_calc_cvm_boot_aggregate(struct ima_digest_data *hash)
{
return -ENODEV;
}
#endif
#endif

View File

@ -17,6 +17,7 @@
#include <linux/err.h>
#include "ima.h"
#include "ima_cvm.h"
/* name for boot aggregate entry */
const char boot_aggregate_name[] = "boot_aggregate";
@ -58,6 +59,16 @@ static int __init ima_add_boot_aggregate(void)
iint->ima_hash->algo = ima_hash_algo;
iint->ima_hash->length = hash_digest_size[ima_hash_algo];
#ifdef CONFIG_CVM_GUEST
if (ima_cvm_available()) {
result = ima_calc_cvm_boot_aggregate(&hash.hdr);
if (result < 0) {
audit_cause = "hashing_error";
goto err_out;
}
}
#endif
/*
* With TPM 2.0 hash agility, TPM chips could support multiple TPM
* PCR banks, allowing firmware to configure and enable different
@ -114,7 +125,15 @@ int __init ima_init(void)
{
int rc;
#ifdef CONFIG_CVM_GUEST
rc = ima_cvm_init();
if (rc) {
pr_info("No CVM found, activating CVM-bypass!\n");
ima_tpm_chip = tpm_default_chip();
}
#else
ima_tpm_chip = tpm_default_chip();
#endif
if (!ima_tpm_chip)
pr_info("No TPM chip found, activating TPM-bypass!\n");

View File

@ -18,6 +18,7 @@
#include <linux/rculist.h>
#include <linux/slab.h>
#include "ima.h"
#include "ima_cvm.h"
#define AUDIT_CAUSE_LEN_MAX 32
@ -186,6 +187,16 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
if (violation) /* invalidate pcr */
digests_arg = digests;
#ifdef CONFIG_CVM_GUEST
tpmresult = ima_cvm_extend(digests_arg);
if (tpmresult != 0) {
snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TSI_error(%d)",
tpmresult);
audit_cause = tpm_audit_cause;
audit_info = 0;
}
#endif
tpmresult = ima_pcr_extend(digests_arg, entry->pcr);
if (tpmresult != 0) {
snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",

View File

@ -812,7 +812,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
goto out_err_no_arch_destroy_vm;
}
kvm->max_halt_poll_ns = halt_poll_ns;
#ifdef CONFIG_CVM_HOST
if (kvm_arm_cvm_type(type))
kvm->max_halt_poll_ns = CVM_MAX_HALT_POLL_NS;
else
#endif
kvm->max_halt_poll_ns = halt_poll_ns;
r = kvm_arch_init_vm(kvm, type);
if (r)