anolis: KVM: ARM: Support KVM being compiled as a kernel module
ANBZ: #4924 This patch adds support for KVM ARM64 to be compiled as a kernel module. It makes the CONFIG_KVM_ARM_HOST as a tristate option and adds a new config option CONFIG_KVM_ARM_HOST_VHE_ONLY to ensure that kernel module feature only supports for VHE system. [ Merged commit("ARM64: KVM: Fix kvm running on non-vhe platform.") ] Signed-off-by: Shannon Zhao <shannon.zhao@linux.alibaba.com> Signed-off-by: Xiang Zheng <xiang.zheng@linux.alibaba.com> Reviewed-by: luanshi <zhangliguang@linux.alibaba.com> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> Signed-off-by: Guanjun <guanjun@linux.alibaba.com> Signed-off-by: xuejun-xj <jiyunxue@linux.alibaba.com> Link: https://gitee.com/anolis/cloud-kernel/pulls/1628
This commit is contained in:
parent
3efc4fdeac
commit
d2310fdf80
|
@ -59,6 +59,11 @@
|
|||
|
||||
#define ICACHEF_ALIASING 0
|
||||
#define ICACHEF_VPIPT 1
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
int icache_is_aliasing(void);
|
||||
int icache_is_vpipt(void);
|
||||
#else
|
||||
extern unsigned long __icache_flags;
|
||||
|
||||
/*
|
||||
|
@ -74,6 +79,7 @@ static __always_inline int icache_is_vpipt(void)
|
|||
{
|
||||
return test_bit(ICACHEF_VPIPT, &__icache_flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline u32 cache_type_cwg(void)
|
||||
{
|
||||
|
|
|
@ -98,11 +98,15 @@ static inline unsigned int __bit_to_vq(unsigned int bit)
|
|||
return SVE_VQ_MAX - bit;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
bool sve_vq_available(unsigned int vq);
|
||||
#else
|
||||
/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
|
||||
static inline bool sve_vq_available(unsigned int vq)
|
||||
{
|
||||
return test_bit(__vq_to_bit(vq), sve_vq_map);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
|
||||
|
|
|
@ -136,6 +136,13 @@ extern void *__vhe_undefined_symbol;
|
|||
* - Don't let the nVHE hypervisor have access to this, as it will
|
||||
* pick the *wrong* symbol (yes, it runs at EL2...).
|
||||
*/
|
||||
#if defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
#define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym)
|
||||
#define this_cpu_ptr_hyp_sym(sym) this_cpu_ptr(&sym)
|
||||
#define per_cpu_ptr_hyp_sym(sym, cpu) per_cpu_ptr(&sym, cpu)
|
||||
#define CHOOSE_VHE_SYM(sym) sym
|
||||
#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
|
||||
#else
|
||||
#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \
|
||||
? CHOOSE_VHE_SYM(sym) \
|
||||
: CHOOSE_NVHE_SYM(sym))
|
||||
|
@ -150,6 +157,7 @@ extern void *__vhe_undefined_symbol;
|
|||
|
||||
#define CHOOSE_VHE_SYM(sym) sym
|
||||
#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -624,7 +624,7 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
|
|||
return (!has_vhe() && attr->exclude_host);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
|
||||
#if IS_ENABLED(CONFIG_KVM) /* Avoid conflicts with core headers if CONFIG_KVM=n */
|
||||
static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_arch_vcpu_run_map_fp(vcpu);
|
||||
|
@ -635,9 +635,6 @@ void kvm_clr_pmu_events(u32 clr);
|
|||
|
||||
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
|
||||
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
|
||||
#else
|
||||
static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
|
||||
static inline void kvm_clr_pmu_events(u32 clr) {}
|
||||
#endif
|
||||
|
||||
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -63,6 +63,7 @@
|
|||
* specific registers encoded in the instructions).
|
||||
*/
|
||||
.macro kern_hyp_va reg
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
alternative_cb kvm_update_va_mask
|
||||
and \reg, \reg, #1 /* mask with va_mask */
|
||||
ror \reg, \reg, #1 /* rotate to the first tag bit */
|
||||
|
@ -70,6 +71,7 @@ alternative_cb kvm_update_va_mask
|
|||
add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */
|
||||
ror \reg, \reg, #63 /* rotate back */
|
||||
alternative_cb_end
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#else
|
||||
|
@ -86,6 +88,7 @@ void kvm_compute_layout(void);
|
|||
|
||||
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
{
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
|
||||
"ror %0, %0, #1\n"
|
||||
"add %0, %0, #0\n"
|
||||
|
@ -93,6 +96,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
|
|||
"ror %0, %0, #63\n",
|
||||
kvm_update_va_mask)
|
||||
: "+r" (v));
|
||||
#endif
|
||||
return v;
|
||||
}
|
||||
|
||||
|
@ -208,6 +212,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
/*
|
||||
* EL2 vectors can be mapped and rerouted in a number of ways,
|
||||
* depending on the kernel configuration and CPU present:
|
||||
|
@ -254,6 +259,12 @@ static inline void *kvm_get_hyp_vector(void)
|
|||
|
||||
return vect;
|
||||
}
|
||||
#else
|
||||
static inline void *kvm_get_hyp_vector(void)
|
||||
{
|
||||
return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
|
||||
}
|
||||
#endif
|
||||
|
||||
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
|
||||
|
||||
|
|
|
@ -254,4 +254,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
|
|||
(regs)->pstate = PSR_MODE_EL1h; \
|
||||
}
|
||||
|
||||
void register_kvm_pmu_events_handler(void *set, void *clr);
|
||||
void unregister_kvm_pmu_events_handler(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -401,6 +401,7 @@ int apei_claim_sea(struct pt_regs *regs)
|
|||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(apei_claim_sea);
|
||||
|
||||
void arch_reserve_mem_area(acpi_physical_address addr, size_t size)
|
||||
{
|
||||
|
|
|
@ -96,7 +96,7 @@ int main(void)
|
|||
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
|
||||
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
|
||||
BLANK();
|
||||
#ifdef CONFIG_KVM
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
|
||||
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
|
||||
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
|
||||
|
|
|
@ -109,6 +109,7 @@ bool arm64_use_ng_mappings = false;
|
|||
EXPORT_SYMBOL(arm64_use_ng_mappings);
|
||||
|
||||
DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
|
||||
EXPORT_SYMBOL(this_cpu_vector);
|
||||
|
||||
/*
|
||||
* Flag to indicate if we have computed the system wide
|
||||
|
|
|
@ -43,6 +43,24 @@ static const char *icache_policy_str[] = {
|
|||
|
||||
unsigned long __icache_flags;
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
/*
|
||||
* Whilst the D-side always behaves as PIPT on AArch64, aliasing is
|
||||
* permitted in the I-cache.
|
||||
*/
|
||||
int icache_is_aliasing(void)
|
||||
{
|
||||
return test_bit(ICACHEF_ALIASING, &__icache_flags);
|
||||
}
|
||||
EXPORT_SYMBOL(icache_is_aliasing);
|
||||
|
||||
int icache_is_vpipt(void)
|
||||
{
|
||||
return test_bit(ICACHEF_VPIPT, &__icache_flags);
|
||||
}
|
||||
EXPORT_SYMBOL(icache_is_vpipt);
|
||||
#endif
|
||||
|
||||
static const char *const hwcap_str[] = {
|
||||
[KERNEL_HWCAP_FP] = "fp",
|
||||
[KERNEL_HWCAP_ASIMD] = "asimd",
|
||||
|
|
|
@ -37,11 +37,13 @@ SYM_FUNC_START(sve_save_state)
|
|||
sve_save 0, x1, 2
|
||||
ret
|
||||
SYM_FUNC_END(sve_save_state)
|
||||
EXPORT_SYMBOL(sve_save_state)
|
||||
|
||||
SYM_FUNC_START(sve_load_state)
|
||||
sve_load 0, x1, x2, 3, x4
|
||||
ret
|
||||
SYM_FUNC_END(sve_load_state)
|
||||
EXPORT_SYMBOL(sve_load_state)
|
||||
|
||||
SYM_FUNC_START(sve_get_vl)
|
||||
_sve_rdvl 0, 1
|
||||
|
|
|
@ -138,7 +138,9 @@ static void set_sve_default_vl(int val)
|
|||
|
||||
/* Maximum supported vector length across all CPUs (initially poisoned) */
|
||||
int __ro_after_init sve_max_vl = SVE_VL_MIN;
|
||||
EXPORT_SYMBOL(sve_max_vl);
|
||||
int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
|
||||
EXPORT_SYMBOL(sve_max_virtualisable_vl);
|
||||
|
||||
/*
|
||||
* Set of available vector lengths,
|
||||
|
@ -159,6 +161,15 @@ extern void __percpu *efi_sve_state;
|
|||
|
||||
#endif /* ! CONFIG_ARM64_SVE */
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
|
||||
bool sve_vq_available(unsigned int vq)
|
||||
{
|
||||
return test_bit(__vq_to_bit(vq), sve_vq_map);
|
||||
}
|
||||
EXPORT_SYMBOL(sve_vq_available);
|
||||
#endif
|
||||
|
||||
DEFINE_PER_CPU(bool, fpsimd_context_busy);
|
||||
EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
|
||||
|
||||
|
@ -1136,6 +1147,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
|
|||
last->sve_state = sve_state;
|
||||
last->sve_vl = sve_vl;
|
||||
}
|
||||
EXPORT_SYMBOL(fpsimd_bind_state_to_cpu);
|
||||
|
||||
/*
|
||||
* Load the userland FPSIMD state of 'current' from memory, but only if the
|
||||
|
@ -1245,6 +1257,7 @@ void fpsimd_save_and_flush_cpu_state(void)
|
|||
fpsimd_flush_cpu_state();
|
||||
__put_cpu_fpsimd_context();
|
||||
}
|
||||
EXPORT_SYMBOL(fpsimd_save_and_flush_cpu_state);
|
||||
|
||||
#ifdef CONFIG_KERNEL_MODE_NEON
|
||||
|
||||
|
|
|
@ -696,6 +696,7 @@ SYM_DATA_START(__boot_cpu_mode)
|
|||
.long BOOT_CPU_MODE_EL2
|
||||
.long BOOT_CPU_MODE_EL1
|
||||
SYM_DATA_END(__boot_cpu_mode)
|
||||
|
||||
/*
|
||||
* The booting CPU updates the failed status @__early_cpu_boot_status,
|
||||
* with MMU turned off.
|
||||
|
|
|
@ -58,6 +58,8 @@ extern char hibernate_el2_vectors[];
|
|||
|
||||
/* hyp-stub vectors, used to restore el2 during resume from hibernate. */
|
||||
extern char __hyp_stub_vectors[];
|
||||
/* Export it for KVM usage. */
|
||||
EXPORT_SYMBOL(__hyp_stub_vectors);
|
||||
|
||||
/*
|
||||
* The logical cpu number we should resume on, initialised to a non-cpu
|
||||
|
@ -126,10 +128,12 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
|
|||
hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
|
||||
hdr->reenter_kernel = _cpu_resume;
|
||||
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
/* We can't use __hyp_get_vectors() because kvm may still be loaded */
|
||||
if (el2_reset_needed())
|
||||
hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
|
||||
else
|
||||
#endif
|
||||
hdr->__hyp_stub_vectors = 0;
|
||||
|
||||
/* Save the mpidr of the cpu we called cpu_suspend() on... */
|
||||
|
@ -424,11 +428,13 @@ int swsusp_arch_suspend(void)
|
|||
dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
|
||||
dcache_clean_range(__idmap_text_start, __idmap_text_end);
|
||||
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
/* Clean kvm setup code to PoC? */
|
||||
if (el2_reset_needed()) {
|
||||
dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
|
||||
dcache_clean_range(__hyp_text_start, __hyp_text_end);
|
||||
}
|
||||
#endif
|
||||
|
||||
swsusp_mte_restore_tags();
|
||||
|
||||
|
@ -698,6 +704,7 @@ int swsusp_arch_resume(void)
|
|||
*
|
||||
* We can skip this step if we booted at EL1, or are running with VHE.
|
||||
*/
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
if (el2_reset_needed()) {
|
||||
phys_addr_t el2_vectors = phys_hibernate_exit; /* base */
|
||||
el2_vectors += hibernate_el2_vectors -
|
||||
|
@ -705,6 +712,7 @@ int swsusp_arch_resume(void)
|
|||
|
||||
__hyp_set_vectors(el2_vectors);
|
||||
}
|
||||
#endif
|
||||
|
||||
hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
|
||||
resume_hdr.reenter_kernel, restore_pblist,
|
||||
|
|
|
@ -592,6 +592,7 @@ u32 __kprobes aarch64_insn_gen_nop(void)
|
|||
{
|
||||
return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
|
||||
}
|
||||
EXPORT_SYMBOL(aarch64_insn_gen_nop);
|
||||
|
||||
u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
|
||||
enum aarch64_insn_branch_type type)
|
||||
|
@ -1697,3 +1698,4 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
|
|||
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
|
||||
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
|
||||
}
|
||||
EXPORT_SYMBOL(aarch64_insn_gen_extr);
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/percpu.h>
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(arm64_mpam_has_hcr);
|
||||
EXPORT_SYMBOL(arm64_mpam_has_hcr);
|
||||
DEFINE_PER_CPU(u64, arm64_mpam_default);
|
||||
DEFINE_PER_CPU(u64, arm64_mpam_current);
|
||||
|
||||
|
|
|
@ -615,12 +615,30 @@ static inline void armv8pmu_enable_counter(u32 mask)
|
|||
write_sysreg(mask, pmcntenset_el0);
|
||||
}
|
||||
|
||||
static void (*kvm_set_pmu_events_ptr)(u32 set, struct perf_event_attr *attr);
|
||||
static void (*kvm_clr_pmu_events_ptr)(u32 clr);
|
||||
|
||||
void register_kvm_pmu_events_handler(void *set, void *clr)
|
||||
{
|
||||
kvm_set_pmu_events_ptr = set;
|
||||
kvm_clr_pmu_events_ptr = clr;
|
||||
}
|
||||
EXPORT_SYMBOL(register_kvm_pmu_events_handler);
|
||||
|
||||
void unregister_kvm_pmu_events_handler(void)
|
||||
{
|
||||
kvm_set_pmu_events_ptr = NULL;
|
||||
kvm_clr_pmu_events_ptr = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(unregister_kvm_pmu_events_handler);
|
||||
|
||||
static inline void armv8pmu_enable_event_counter(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_attr *attr = &event->attr;
|
||||
u32 mask = armv8pmu_event_cnten_mask(event);
|
||||
|
||||
kvm_set_pmu_events(mask, attr);
|
||||
if (kvm_set_pmu_events_ptr)
|
||||
(*kvm_set_pmu_events_ptr)(mask, attr);
|
||||
|
||||
/* We rely on the hypervisor switch code to enable guest counters */
|
||||
if (!kvm_pmu_counter_deferred(attr))
|
||||
|
@ -642,7 +660,8 @@ static inline void armv8pmu_disable_event_counter(struct perf_event *event)
|
|||
struct perf_event_attr *attr = &event->attr;
|
||||
u32 mask = armv8pmu_event_cnten_mask(event);
|
||||
|
||||
kvm_clr_pmu_events(mask);
|
||||
if (kvm_clr_pmu_events_ptr)
|
||||
(*kvm_clr_pmu_events_ptr)(mask);
|
||||
|
||||
/* We rely on the hypervisor switch code to disable guest counters */
|
||||
if (!kvm_pmu_counter_deferred(attr))
|
||||
|
@ -934,7 +953,8 @@ static void armv8pmu_reset(void *info)
|
|||
armv8pmu_disable_intens(U32_MAX);
|
||||
|
||||
/* Clear the counters we flip at guest entry/exit */
|
||||
kvm_clr_pmu_events(U32_MAX);
|
||||
if (kvm_clr_pmu_events_ptr)
|
||||
kvm_clr_pmu_events_ptr(U32_MAX);
|
||||
|
||||
/*
|
||||
* Initialize & Reset PMNC. Request overflow interrupt for
|
||||
|
|
|
@ -430,6 +430,7 @@ int __init arch_populate_kprobe_blacklist(void)
|
|||
return ret;
|
||||
ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
|
||||
(unsigned long)__idmap_text_end);
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
|
||||
|
@ -438,6 +439,7 @@ int __init arch_populate_kprobe_blacklist(void)
|
|||
return ret;
|
||||
ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
|
||||
(unsigned long)__hyp_idmap_text_end);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -217,6 +217,7 @@ enum mitigation_state arm64_get_spectre_v2_state(void)
|
|||
{
|
||||
return spectre_v2_state;
|
||||
}
|
||||
EXPORT_SYMBOL(arm64_get_spectre_v2_state);
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
#include <asm/cacheflush.h>
|
||||
|
@ -478,6 +479,7 @@ enum mitigation_state arm64_get_spectre_v4_state(void)
|
|||
{
|
||||
return spectre_v4_state;
|
||||
}
|
||||
EXPORT_SYMBOL(arm64_get_spectre_v4_state);
|
||||
|
||||
static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
|
||||
{
|
||||
|
@ -848,6 +850,7 @@ enum mitigation_state arm64_get_spectre_bhb_state(void)
|
|||
{
|
||||
return spectre_bhb_state;
|
||||
}
|
||||
EXPORT_SYMBOL(arm64_get_spectre_bhb_state);
|
||||
|
||||
/*
|
||||
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
|
||||
|
|
|
@ -54,6 +54,13 @@
|
|||
static int num_standard_resources;
|
||||
static struct resource *standard_resources;
|
||||
|
||||
/* Export below symbols for KVM usage. */
|
||||
EXPORT_SYMBOL(__boot_cpu_mode);
|
||||
extern char vectors[];
|
||||
EXPORT_SYMBOL(vectors);
|
||||
EXPORT_SYMBOL(__flush_dcache_area);
|
||||
EXPORT_SYMBOL(invalidate_icache_range);
|
||||
|
||||
phys_addr_t __fdt_pointer __initdata;
|
||||
|
||||
/*
|
||||
|
|
|
@ -437,8 +437,11 @@ static void __init hyp_mode_check(void)
|
|||
"CPU: CPUs started in inconsistent modes");
|
||||
else
|
||||
pr_info("CPU: All CPU(s) started at EL1\n");
|
||||
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
if (IS_ENABLED(CONFIG_KVM))
|
||||
kvm_compute_layout();
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
|
@ -1068,6 +1071,7 @@ void smp_send_reschedule(int cpu)
|
|||
{
|
||||
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_send_reschedule);
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||
void tick_broadcast(const struct cpumask *mask)
|
||||
|
|
|
@ -744,6 +744,7 @@ const char *esr_get_class_string(u32 esr)
|
|||
{
|
||||
return esr_class_str[ESR_ELx_EC(esr)];
|
||||
}
|
||||
EXPORT_SYMBOL(esr_get_class_string);
|
||||
|
||||
/*
|
||||
* bad_mode handles the impossible case in the exception vector. This is always
|
||||
|
@ -864,6 +865,7 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
|
|||
arm64_serror_panic(regs, esr);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(arm64_is_fatal_ras_serror);
|
||||
|
||||
asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
|
|
|
@ -17,8 +17,8 @@ menuconfig VIRTUALIZATION
|
|||
|
||||
if VIRTUALIZATION
|
||||
|
||||
menuconfig KVM
|
||||
bool "Kernel-based Virtual Machine (KVM) support"
|
||||
config KVM
|
||||
tristate "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on OF
|
||||
# for TASKSTATS/TASK_DELAY_ACCT:
|
||||
depends on NET && MULTIUSER
|
||||
|
@ -41,15 +41,20 @@ menuconfig KVM
|
|||
select HAVE_KVM_VCPU_RUN_PID_CHANGE
|
||||
select TASKSTATS
|
||||
select TASK_DELAY_ACCT
|
||||
select KVM_ARM_HOST_VHE_ONLY if KVM=m
|
||||
help
|
||||
Support hosting virtualized guest machines.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
if KVM
|
||||
|
||||
source "virt/kvm/Kconfig"
|
||||
|
||||
config KVM_ARM_HOST_VHE_ONLY
|
||||
bool
|
||||
depends on KVM=m
|
||||
default n
|
||||
|
||||
config KVM_ARM_PMU
|
||||
bool "Virtual Performance Monitoring Unit (PMU) support"
|
||||
depends on HW_PERF_EVENTS
|
||||
|
|
|
@ -4,11 +4,12 @@
|
|||
#
|
||||
|
||||
ccflags-y += -I $(srctree)/$(src)
|
||||
ccflags-y += -I $(srctree)/$(src)/hyp/include
|
||||
|
||||
KVM=../../../virt/kvm
|
||||
IRQCHIP=../../../drivers/irqchip
|
||||
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
obj-$(CONFIG_KVM) += hyp/
|
||||
|
||||
kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
|
||||
$(KVM)/vfio.o $(KVM)/irqchip.o \
|
||||
|
@ -25,3 +26,24 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
|
|||
vgic/vgic-its.o vgic/vgic-debug.o
|
||||
|
||||
kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
|
||||
|
||||
ifndef CONFIG_KVM_ARM_HOST_VHE_ONLY
|
||||
obj-$(CONFIG_KVM) += hyp/
|
||||
else
|
||||
kvm-y += $(IRQCHIP)/irq-gic-v4.o
|
||||
kvm-y += $(addprefix hyp/, fpsimd.o hyp-entry.o entry.o vgic-v2-cpuif-proxy.o aarch32.o pgtable.o smccc_wa.o vgic-v3-sr.o) $(addprefix hyp/vhe/, debug-sr.o sysreg-sr.o timer-sr.o tlb.o)
|
||||
|
||||
extra-y := hyp.lds switch.tmp.o
|
||||
|
||||
$(obj)/hyp.lds: $(src)/hyp/hyp.lds.S FORCE
|
||||
$(call if_changed_dep,cpp_lds_S)
|
||||
|
||||
$(obj)/switch.tmp.o: $(src)/hyp/vhe/switch.c FORCE
|
||||
$(call if_changed_rule,cc_o_c)
|
||||
|
||||
LDFLAGS_switch.o := -r -T
|
||||
$(obj)/switch.o: $(obj)/hyp.lds $(obj)/switch.tmp.o FORCE
|
||||
$(call if_changed,ld)
|
||||
|
||||
kvm-y += switch.o
|
||||
endif
|
||||
|
|
|
@ -1078,10 +1078,12 @@ void kvm_timer_hyp_uninit(void)
|
|||
|
||||
cpuhp_remove_state(CPUHP_AP_KVM_ARM_TIMER_STARTING);
|
||||
if (info->physical_irq > 0) {
|
||||
on_each_cpu((smp_call_func_t)disable_percpu_irq, (void *)host_ptimer_irq, 1);
|
||||
on_each_cpu((smp_call_func_t)disable_percpu_irq,
|
||||
(void *)(long)host_ptimer_irq, 1);
|
||||
free_percpu_irq(host_ptimer_irq, kvm_get_running_vcpus());
|
||||
}
|
||||
on_each_cpu((smp_call_func_t)disable_percpu_irq, (void *)host_vtimer_irq, 1);
|
||||
on_each_cpu((smp_call_func_t)disable_percpu_irq,
|
||||
(void *)(long)host_vtimer_irq, 1);
|
||||
free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
|
||||
}
|
||||
|
||||
|
|
|
@ -48,8 +48,9 @@ __asm__(".arch_extension virt");
|
|||
#endif
|
||||
|
||||
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
|
||||
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
|
||||
#endif
|
||||
unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
|
||||
|
||||
/* The VMID used in the VTTBR */
|
||||
|
@ -1332,6 +1333,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
|||
}
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
static unsigned long nvhe_percpu_size(void)
|
||||
{
|
||||
return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
|
||||
|
@ -1379,9 +1381,11 @@ static int kvm_map_vectors(void)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void cpu_init_hyp_mode(void)
|
||||
{
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
phys_addr_t pgd_ptr;
|
||||
unsigned long hyp_stack_ptr;
|
||||
unsigned long vector_ptr;
|
||||
|
@ -1423,12 +1427,15 @@ static void cpu_init_hyp_mode(void)
|
|||
arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
|
||||
kvm_call_hyp_nvhe(__kvm_enable_ssbs);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void cpu_hyp_reset(void)
|
||||
{
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
if (!is_kernel_in_hyp_mode())
|
||||
__hyp_reset_vectors();
|
||||
#endif
|
||||
}
|
||||
|
||||
static void cpu_hyp_reinit(void)
|
||||
|
@ -1575,6 +1582,7 @@ static int init_subsystems(void)
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
register_kvm_pmu_events_handler(kvm_set_pmu_events, kvm_clr_pmu_events);
|
||||
kvm_perf_init();
|
||||
kvm_coproc_table_init();
|
||||
|
||||
|
@ -1584,6 +1592,7 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
static void teardown_hyp_mode(void)
|
||||
{
|
||||
int cpu;
|
||||
|
@ -1709,6 +1718,7 @@ out_err:
|
|||
kvm_err("error initializing Hyp mode: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void check_kvm_target_cpu(void *ret)
|
||||
{
|
||||
|
@ -1810,11 +1820,13 @@ int kvm_arch_init(void *opaque)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
if (!in_hyp_mode) {
|
||||
err = init_hyp_mode();
|
||||
if (err)
|
||||
goto out_err;
|
||||
}
|
||||
#endif
|
||||
|
||||
err = init_subsystems();
|
||||
if (err)
|
||||
|
@ -1829,15 +1841,18 @@ int kvm_arch_init(void *opaque)
|
|||
|
||||
out_hyp:
|
||||
hyp_cpu_pm_exit();
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
if (!in_hyp_mode)
|
||||
teardown_hyp_mode();
|
||||
out_err:
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
/* NOP: Compiling as a module not supported */
|
||||
void kvm_arch_exit(void)
|
||||
{
|
||||
unregister_kvm_pmu_events_handler();
|
||||
kvm_perf_teardown();
|
||||
kvm_timer_hyp_uninit();
|
||||
kvm_vgic_hyp_uninit();
|
||||
|
@ -1846,8 +1861,25 @@ void kvm_arch_exit(void)
|
|||
|
||||
static int arm_init(void)
|
||||
{
|
||||
int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
|
||||
int rc;
|
||||
|
||||
if (IS_MODULE(CONFIG_KVM) && !is_kernel_in_hyp_mode()) {
|
||||
kvm_err("kvm arm kernel module only supports for VHE system\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
|
||||
if (!rc)
|
||||
kvm_info("init kvm-arm successfully\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
module_init(arm_init);
|
||||
|
||||
static void arm_exit(void)
|
||||
{
|
||||
kvm_exit();
|
||||
kvm_info("exit kvm-arm successfully\n");
|
||||
}
|
||||
|
||||
module_exit(arm_exit);
|
||||
|
|
|
@ -4,6 +4,9 @@
|
|||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
#define __KVM_VHE_HYPERVISOR__
|
||||
#endif
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
|
|
|
@ -4,6 +4,9 @@
|
|||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
#define __KVM_VHE_HYPERVISOR__
|
||||
#endif
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
|
@ -191,6 +194,7 @@ SYM_CODE_START(__kvm_hyp_vector)
|
|||
valid_vect el1_error // Error 32-bit EL1
|
||||
SYM_CODE_END(__kvm_hyp_vector)
|
||||
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
.macro hyp_ventry
|
||||
.align 7
|
||||
1: esb
|
||||
|
@ -240,3 +244,4 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
|
|||
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
|
||||
.org 1b
|
||||
SYM_CODE_END(__bp_harden_hyp_vecs)
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2020 Google LLC.
|
||||
* Written by David Brazdil <dbrazdil@google.com>
|
||||
*
|
||||
* Linker script used for partial linking of nVHE EL2 object files.
|
||||
*/
|
||||
|
||||
#include <asm/hyp_image.h>
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
SECTIONS {
|
||||
. = ALIGN(SZ_8); \
|
||||
__kvm_ex_table : { \
|
||||
__start___kvm_ex_table = .; \
|
||||
*(__kvm_ex_table) \
|
||||
__stop___kvm_ex_table = .; \
|
||||
}
|
||||
}
|
|
@ -4,6 +4,9 @@
|
|||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
#define __KVM_VHE_HYPERVISOR__
|
||||
#endif
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
|
|
|
@ -4,7 +4,10 @@
|
|||
#
|
||||
|
||||
asflags-y := -D__KVM_VHE_HYPERVISOR__
|
||||
ccflags-y := -D__KVM_VHE_HYPERVISOR__
|
||||
ccflags-y := -D__KVM_VHE_HYPERVISOR__ \
|
||||
-fno-stack-protector \
|
||||
-DDISABLE_BRANCH_PROFILING \
|
||||
$(DISABLE_STACKLEAK_PLUGIN)
|
||||
|
||||
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o
|
||||
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
|
||||
|
|
|
@ -4,6 +4,9 @@
|
|||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
#define __KVM_VHE_HYPERVISOR__
|
||||
#endif
|
||||
#include <hyp/switch.h>
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
|
|
|
@ -4,6 +4,9 @@
|
|||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
#define __KVM_VHE_HYPERVISOR__
|
||||
#endif
|
||||
#include <hyp/sysreg-sr.h>
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
|
|
@ -25,9 +25,11 @@
|
|||
static struct kvm_pgtable *hyp_pgtable;
|
||||
static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
|
||||
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
static unsigned long hyp_idmap_start;
|
||||
static unsigned long hyp_idmap_end;
|
||||
static phys_addr_t hyp_idmap_vector;
|
||||
#endif
|
||||
|
||||
static unsigned long io_map_base;
|
||||
|
||||
|
@ -1188,6 +1190,7 @@ phys_addr_t kvm_mmu_get_httbr(void)
|
|||
return __pa(hyp_pgtable->pgd);
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
phys_addr_t kvm_get_idmap_vector(void)
|
||||
{
|
||||
return hyp_idmap_vector;
|
||||
|
@ -1267,6 +1270,7 @@ out_free_pgtable:
|
|||
out:
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
const struct kvm_userspace_memory_region *mem,
|
||||
|
|
|
@ -12,10 +12,12 @@
|
|||
#include <asm/insn.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
/*
|
||||
* The LSB of the HYP VA tag
|
||||
*/
|
||||
static u8 tag_lsb;
|
||||
|
||||
/*
|
||||
* The HYP VA tag value with the region bit
|
||||
*/
|
||||
|
@ -97,7 +99,7 @@ static u32 compute_instruction(int n, u32 rd, u32 rn)
|
|||
return insn;
|
||||
}
|
||||
|
||||
void __init kvm_update_va_mask(struct alt_instr *alt,
|
||||
void kvm_update_va_mask(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
int i;
|
||||
|
@ -130,10 +132,12 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
|
|||
updptr[i] = cpu_to_le32(insn);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void *__kvm_bp_vect_base;
|
||||
int __kvm_harden_el2_vector_slot;
|
||||
|
||||
#if !defined(CONFIG_KVM_ARM_HOST_VHE_ONLY)
|
||||
void kvm_patch_vector_branch(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
|
@ -201,3 +205,4 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
|
|||
AARCH64_INSN_BRANCH_NOLINK);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -80,6 +80,7 @@ int pmd_huge(pmd_t pmd)
|
|||
{
|
||||
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
|
||||
}
|
||||
EXPORT_SYMBOL(pmd_huge);
|
||||
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
|
@ -89,6 +90,7 @@ int pud_huge(pud_t pud)
|
|||
return 0;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(pud_huge);
|
||||
|
||||
/*
|
||||
* Select all bits except the pfn
|
||||
|
|
|
@ -42,7 +42,9 @@
|
|||
#define NO_CONT_MAPPINGS BIT(1)
|
||||
|
||||
u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
|
||||
EXPORT_SYMBOL(idmap_t0sz);
|
||||
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
|
||||
EXPORT_SYMBOL(idmap_ptrs_per_pgd);
|
||||
|
||||
u64 __section(".mmuoff.data.write") vabits_actual;
|
||||
EXPORT_SYMBOL(vabits_actual);
|
||||
|
@ -79,6 +81,8 @@ void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
|
|||
pgd_clear_fixmap();
|
||||
spin_unlock(&swapper_pgdir_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(set_swapper_pgd);
|
||||
EXPORT_SYMBOL(swapper_pg_dir);
|
||||
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot)
|
||||
|
|
|
@ -995,6 +995,7 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
|
|||
{
|
||||
return &arch_timer_kvm_info;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_timer_get_kvm_info);
|
||||
|
||||
static void __init arch_counter_register(unsigned type)
|
||||
{
|
||||
|
|
|
@ -31,7 +31,10 @@ obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
|
|||
obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
|
||||
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
|
||||
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o
|
||||
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
|
||||
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o
|
||||
ifndef CONFIG_KVM_ARM_HOST_VHE_ONLY
|
||||
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v4.o
|
||||
endif
|
||||
obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o
|
||||
obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o
|
||||
obj-$(CONFIG_ARM_GIC_PHYTIUM_2500) += irq-gic-phytium-2500.o irq-gic-phytium-2500-its.o
|
||||
|
|
|
@ -18,6 +18,7 @@ const struct gic_kvm_info *gic_get_kvm_info(void)
|
|||
{
|
||||
return gic_kvm_info;
|
||||
}
|
||||
EXPORT_SYMBOL(gic_get_kvm_info);
|
||||
|
||||
void gic_set_kvm_info(const struct gic_kvm_info *info)
|
||||
{
|
||||
|
|
|
@ -5502,6 +5502,22 @@ static void __init its_acpi_probe(void)
|
|||
static void __init its_acpi_probe(void) { }
|
||||
#endif
|
||||
|
||||
static int its_init_v4(struct irq_domain *domain,
|
||||
const struct irq_domain_ops *ops,
|
||||
const struct irq_domain_ops *sgi_ops)
|
||||
{
|
||||
if (domain) {
|
||||
pr_info("ITS: Enabling GICv4 support\n");
|
||||
gic_domain = domain;
|
||||
vpe_domain_ops = ops;
|
||||
sgi_domain_ops = sgi_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_err("ITS: No GICv4 VPE domain allocated\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
int __init phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists,
|
||||
struct irq_domain *parent_domain)
|
||||
{
|
||||
|
|
|
@ -5386,6 +5386,29 @@ static void __init its_acpi_probe(void)
|
|||
static void __init its_acpi_probe(void) { }
|
||||
#endif
|
||||
|
||||
struct irq_domain *gic_domain;
|
||||
EXPORT_SYMBOL(gic_domain);
|
||||
const struct irq_domain_ops *vpe_domain_ops;
|
||||
EXPORT_SYMBOL(vpe_domain_ops);
|
||||
const struct irq_domain_ops *sgi_domain_ops;
|
||||
EXPORT_SYMBOL(sgi_domain_ops);
|
||||
|
||||
static int its_init_v4(struct irq_domain *domain,
|
||||
const struct irq_domain_ops *ops,
|
||||
const struct irq_domain_ops *sgi_ops)
|
||||
{
|
||||
if (domain) {
|
||||
pr_info("ITS: Enabling GICv4 support\n");
|
||||
gic_domain = domain;
|
||||
vpe_domain_ops = ops;
|
||||
sgi_domain_ops = sgi_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_err("ITS: No GICv4 VPE domain allocated\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
|
||||
struct irq_domain *parent_domain)
|
||||
{
|
||||
|
|
|
@ -83,10 +83,6 @@
|
|||
* performing INVALL operations.
|
||||
*/
|
||||
|
||||
static struct irq_domain *gic_domain;
|
||||
static const struct irq_domain_ops *vpe_domain_ops;
|
||||
static const struct irq_domain_ops *sgi_domain_ops;
|
||||
|
||||
static bool has_v4_1(void)
|
||||
{
|
||||
return !!sgi_domain_ops;
|
||||
|
@ -177,6 +173,7 @@ err:
|
|||
|
||||
return -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL(its_alloc_vcpu_irqs);
|
||||
|
||||
static void its_free_sgi_irqs(struct its_vm *vm)
|
||||
{
|
||||
|
@ -204,6 +201,7 @@ void its_free_vcpu_irqs(struct its_vm *vm)
|
|||
irq_domain_remove(vm->domain);
|
||||
irq_domain_free_fwnode(vm->fwnode);
|
||||
}
|
||||
EXPORT_SYMBOL(its_free_vcpu_irqs);
|
||||
|
||||
static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
|
||||
{
|
||||
|
@ -285,6 +283,7 @@ int its_invall_vpe(struct its_vpe *vpe)
|
|||
|
||||
return its_send_vpe_cmd(vpe, &info);
|
||||
}
|
||||
EXPORT_SYMBOL(its_invall_vpe);
|
||||
|
||||
int its_map_vlpi(int irq, struct its_vlpi_map *map)
|
||||
{
|
||||
|
@ -308,6 +307,7 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(its_map_vlpi);
|
||||
|
||||
int its_get_vlpi(int irq, struct its_vlpi_map *map)
|
||||
{
|
||||
|
@ -320,12 +320,14 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map)
|
|||
|
||||
return irq_set_vcpu_affinity(irq, &info);
|
||||
}
|
||||
EXPORT_SYMBOL(its_get_vlpi);
|
||||
|
||||
int its_unmap_vlpi(int irq)
|
||||
{
|
||||
irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
|
||||
return irq_set_vcpu_affinity(irq, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(its_unmap_vlpi);
|
||||
|
||||
int its_prop_update_vlpi(int irq, u8 config, bool inv)
|
||||
{
|
||||
|
@ -338,6 +340,7 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
|
|||
|
||||
return irq_set_vcpu_affinity(irq, &info);
|
||||
}
|
||||
EXPORT_SYMBOL(its_prop_update_vlpi);
|
||||
|
||||
int its_prop_update_vsgi(int irq, u8 priority, bool group)
|
||||
{
|
||||
|
@ -351,19 +354,3 @@ int its_prop_update_vsgi(int irq, u8 priority, bool group)
|
|||
|
||||
return irq_set_vcpu_affinity(irq, &info);
|
||||
}
|
||||
|
||||
int its_init_v4(struct irq_domain *domain,
|
||||
const struct irq_domain_ops *vpe_ops,
|
||||
const struct irq_domain_ops *sgi_ops)
|
||||
{
|
||||
if (domain) {
|
||||
pr_info("ITS: Enabling GICv4 support\n");
|
||||
gic_domain = domain;
|
||||
vpe_domain_ops = vpe_ops;
|
||||
sgi_domain_ops = sgi_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_err("ITS: No GICv4 VPE domain allocated\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
|
|
@ -141,8 +141,8 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv);
|
|||
int its_prop_update_vsgi(int irq, u8 priority, bool group);
|
||||
|
||||
struct irq_domain_ops;
|
||||
int its_init_v4(struct irq_domain *domain,
|
||||
const struct irq_domain_ops *vpe_ops,
|
||||
const struct irq_domain_ops *sgi_ops);
|
||||
extern struct irq_domain *gic_domain;
|
||||
extern const struct irq_domain_ops *vpe_domain_ops;
|
||||
extern const struct irq_domain_ops *sgi_domain_ops;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1484,6 +1484,7 @@ out_free_desc:
|
|||
irq_free_descs(virq, nr_irqs);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__irq_domain_alloc_irqs);
|
||||
|
||||
/* The irq_data was moved, fix the revmap to refer to the new location */
|
||||
static void irq_domain_fix_revmap(struct irq_data *d)
|
||||
|
@ -1678,6 +1679,7 @@ void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
|
|||
irq_domain_free_irq_data(virq, nr_irqs);
|
||||
irq_free_descs(virq, nr_irqs);
|
||||
}
|
||||
EXPORT_SYMBOL(irq_domain_free_irqs);
|
||||
|
||||
/**
|
||||
* irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
|
||||
|
@ -1769,6 +1771,7 @@ int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
|
|||
irqd_set_activated(irq_data);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(irq_domain_activate_irq);
|
||||
|
||||
/**
|
||||
* irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
|
||||
|
@ -1785,6 +1788,7 @@ void irq_domain_deactivate_irq(struct irq_data *irq_data)
|
|||
irqd_clr_activated(irq_data);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(irq_domain_deactivate_irq);
|
||||
|
||||
static void irq_domain_check_hierarchy(struct irq_domain *domain)
|
||||
{
|
||||
|
|
|
@ -38,6 +38,7 @@ void pud_clear_bad(pud_t *pud)
|
|||
pud_ERROR(*pud);
|
||||
pud_clear(pud);
|
||||
}
|
||||
EXPORT_SYMBOL(pud_clear_bad);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue