From 9c733b740b7722981d230f568874a6f8d9616356 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 11 Oct 2022 22:36:56 +0100 Subject: [PATCH 01/19] arm64/booting: Document boot requirements for FEAT_NMI commit c8b89f40f6393a3e2bb8892e23bb7f6da787b5e7 openEuler In order to use FEAT_NMI we must be able to use ALLINT, require that it behave as though not trapped when it is present. Signed-off-by: Mark Brown Signed-off-by: Jie Liu Signed-off-by: huwentao --- Documentation/arch/arm64/booting.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/arch/arm64/booting.rst b/Documentation/arch/arm64/booting.rst index 30164fb24a24..3736b4fb4cb8 100644 --- a/Documentation/arch/arm64/booting.rst +++ b/Documentation/arch/arm64/booting.rst @@ -414,6 +414,12 @@ Before jumping into the kernel, the following conditions must be met: - HFGRWR_EL2.nPIRE0_EL1 (bit 57) must be initialised to 0b1. + For CPUs with Non-maskable Interrupts (FEAT_NMI): + + - If the kernel is entered at EL1 and EL2 is present: + + - HCRX_EL2.TALLINT must be initialised to 0b0. + The requirements described above for CPU mode, caches, MMUs, architected timers, coherency and system registers apply to all CPUs. All CPUs must enter the kernel in the same exception level. Where the values documented -- Gitee From 5601fa2d200eb89df71514a10781c45c444b9e91 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 11 Oct 2022 22:39:28 +0100 Subject: [PATCH 02/19] arm64/sysreg: Add definitions for immediate versions of MSR ALLINT commit abffad0b651f06d94330f8cadbdab00dd0c1c11e openEuler Encodings are provided for ALLINT which allow setting of ALLINT.ALLINT using an immediate rather than requiring that a register be loaded with the value to write. Since these don't currently fit within the scheme we have for sysreg generation add manual encodings like we currently do for other similar registers such as SVCR. Since it is required that these immediate versions be encoded with xzr as the source register provide asm wrapper which ensure this is the case. Signed-off-by: Mark Brown Signed-off-by: Jie Liu Signed-off-by: huwentao --- arch/arm64/include/asm/daifflags.h | 1 + arch/arm64/include/asm/nmi.h | 19 +++++++++++++++++++ arch/arm64/include/asm/sysreg.h | 2 ++ 3 files changed, 22 insertions(+) create mode 100644 arch/arm64/include/asm/nmi.h diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 55f57dfa8e2f..b3bed2004342 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -141,4 +141,5 @@ static inline void local_daif_inherit(struct pt_regs *regs) */ write_sysreg(flags, daif); } + #endif diff --git a/arch/arm64/include/asm/nmi.h b/arch/arm64/include/asm/nmi.h new file mode 100644 index 000000000000..f2d20b713c48 --- /dev/null +++ b/arch/arm64/include/asm/nmi.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 ARM Ltd. + */ +#ifndef __ASM_NMI_H +#define __ASM_NMI_H + + +static __always_inline void _allint_clear(void) +{ + asm volatile(__msr_s(SYS_ALLINT_CLR, "xzr")); +} + +static __always_inline void _allint_set(void) +{ + asm volatile(__msr_s(SYS_ALLINT_SET, "xzr")); +} + +#endif \ No newline at end of file diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 38296579a4fd..810e5b9e7a2c 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -167,6 +167,8 @@ * System registers, organised loosely by encoding but grouped together * where the architected name contains an index. e.g. ID_MMFR_EL1. */ +#define SYS_ALLINT_CLR sys_reg(0, 1, 4, 0, 0) +#define SYS_ALLINT_SET sys_reg(0, 1, 4, 1, 0) #define SYS_SVCR_SMSTOP_SM_EL0 sys_reg(0, 3, 4, 2, 3) #define SYS_SVCR_SMSTART_SM_EL0 sys_reg(0, 3, 4, 3, 3) #define SYS_SVCR_SMSTOP_SMZA_EL0 sys_reg(0, 3, 4, 6, 3) -- Gitee From 6329b99b0bec19a8555ff83293cdd198305e8fff Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 7 Oct 2022 12:17:01 +0100 Subject: [PATCH 03/19] arm64/asm: Introduce assembly macros for managing ALLINT commit 1c203efa0a421d6a2368653f017143a430137be9 openEuler In order to allow assembly code to ensure that not even superpriorty interrupts can preempt it provide macros for enabling and disabling ALLINT.ALLINT. This is not integrated into the existing DAIF macros since we do not always wish to manage ALLINT along with DAIF and the use of DAIF in the naming of the existing macros might lead to surprises if ALLINT is also managed. Signed-off-by: Mark Brown Signed-off-by: Jie Liu Signed-off-by: huwentao --- arch/arm64/include/asm/assembler.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 0fa067c2324d..c010f6df62e8 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -34,6 +34,22 @@ wx\n .req w\n .endr + .macro disable_allint +#ifdef CONFIG_ARM64_NMI +alternative_if ARM64_HAS_NMI + msr_s SYS_ALLINT_SET, xzr +alternative_else_nop_endif +#endif + .endm + + .macro enable_allint +#ifdef CONFIG_ARM64_NMI +alternative_if ARM64_HAS_NMI + msr_s SYS_ALLINT_CLR, xzr +alternative_else_nop_endif +#endif + .endm + .macro disable_daif msr daifset, #0xf .endm -- Gitee From 6ac3077d4d4eb459c0730dcc91797c65cc05097f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 6 Oct 2022 18:21:35 +0100 Subject: [PATCH 04/19] arm64/hyp-stub: Enable access to ALLINT commit 611b58ad24474484e47e99acbbcd34bce12726bd openEuler In order to use NMIs we need to ensure that traps are disabled for it so update HCRX_EL2 to ensure that TALLINT is not set when we detect support for NMIs. Signed-off-by: Mark Brown Signed-off-by: Jie Liu Signed-off-by: huwentao --- arch/arm64/kernel/hyp-stub.S | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 65f76064c86b..731344c90871 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -76,6 +76,18 @@ SYM_CODE_END(elx_sync) SYM_CODE_START_LOCAL(__finalise_el2) finalise_el2_state + // NMIs + check_override id_aa64pfr1 ID_AA64PFR1_EL1_NMI_SHIFT .Linit_nmi .Lskip_nmi x1 x2 +.Linit_nmi: + mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? + ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 + cbz x1, .Lskip_nmi + + mrs_s x1, SYS_HCRX_EL2 + bic x1, x1, #HCRX_EL2_TALLINT_MASK // Don't trap ALLINT + msr_s SYS_HCRX_EL2, x1 +.Lskip_nmi: + // nVHE? No way! Give me the real thing! // Sanity check: MMU *must* be off mrs x1, sctlr_el2 -- Gitee From 8e1aa56f7a2d1784bec28f19063d214abebc8f7c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 4 Nov 2022 18:08:06 +0000 Subject: [PATCH 05/19] arm64/idreg: Add an override for FEAT_NMI commit 7c694a36239a15ccdb2806ad7a32a43d7a26fe3f openEuler Add a named override for FEAT_NMI, allowing it to be explicitly disabled in case of problems. Signed-off-by: Mark Brown Signed-off-by: Jie Liu Signed-off-by: huwentao --- arch/arm64/kernel/idreg-override.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 3addc09f8746..6f248ef8c9d2 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -100,6 +100,7 @@ static const struct ftr_set_desc pfr1 __initconst = { .fields = { FIELD("bt", ID_AA64PFR1_EL1_BT_SHIFT, NULL ), FIELD("mte", ID_AA64PFR1_EL1_MTE_SHIFT, NULL), + FIELD("nmi", ID_AA64PFR1_EL1_NMI_SHIFT, NULL), FIELD("sme", ID_AA64PFR1_EL1_SME_SHIFT, pfr1_sme_filter), {} }, -- Gitee From bdd5715c8b108fcc934dc963c95fd5797ea3f722 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 3 Nov 2022 15:50:08 +0000 Subject: [PATCH 06/19] arm64/cpufeature: Detect PE support for FEAT_NMI commit f72387e1f8bc992a1b8c6ecc37c90c458e92d455 openEuler Use of FEAT_NMI requires that all the PEs in the system and the GIC have NMI support. This patch implements the PE part of that detection. In order to avoid problematic interactions between real and pseudo NMIs we disable the architected feature if the user has enabled pseudo NMIs on the command line. If this is done on a system where support for the architected feature is detected then a warning is printed during boot in order to help users spot what is likely to be a misconfiguration. In order to allow KVM to offer the feature to guests even if pseudo NMIs are in use by the host we have a separate feature for the raw feature which is used in KVM. Signed-off-by: Mark Brown Signed-off-by: Jie Liu Signed-off-by: huwentao --- arch/arm64/include/asm/cpufeature.h | 6 +++ arch/arm64/kernel/cpufeature.c | 66 ++++++++++++++++++++++++++++- arch/arm64/tools/cpucaps | 2 + 3 files changed, 73 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 5bba39376055..53343c6cba58 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -808,6 +808,12 @@ static __always_inline bool system_uses_irq_prio_masking(void) cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING); } +static __always_inline bool system_uses_nmi(void) +{ + return IS_ENABLED(CONFIG_ARM64_NMI) && + cpus_have_const_cap(ARM64_USES_NMI); +} + static inline bool system_supports_mte(void) { return IS_ENABLED(CONFIG_ARM64_MTE) && diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 8a105aae29b5..9dc761612ca2 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -85,6 +85,7 @@ #include #include #include +#include #include #include #include @@ -255,6 +256,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_NMI_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0), @@ -2126,9 +2128,11 @@ static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap) } #endif /* CONFIG_ARM64_E0PD */ -#ifdef CONFIG_ARM64_PSEUDO_NMI +#if IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) || IS_ENABLED(CONFIG_ARM64_NMI) static bool enable_pseudo_nmi; +#endif +#ifdef CONFIG_ARM64_PSEUDO_NMI static int __init early_enable_pseudo_nmi(char *p) { return kstrtobool(p, &enable_pseudo_nmi); @@ -2178,6 +2182,41 @@ static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry } #endif +#ifdef CONFIG_ARM64_NMI +static bool use_nmi(const struct arm64_cpu_capabilities *entry, int scope) +{ + if (!has_cpuid_feature(entry, scope)) + return false; + + /* + * Having both real and pseudo NMIs enabled simultaneously is + * likely to cause confusion. Since pseudo NMIs must be + * enabled with an explicit command line option, if the user + * has set that option on a system with real NMIs for some + * reason assume they know what they're doing. + */ + if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && enable_pseudo_nmi) { + pr_info("Pseudo NMI enabled, not using architected NMI\n"); + return false; + } + + return true; +} + +static void nmi_enable(const struct arm64_cpu_capabilities *__unused) +{ + /* + * Enable use of NMIs controlled by ALLINT, SPINTMASK should + * be clear by default but make it explicit that we are using + * this mode. Ensure that ALLINT is clear first in order to + * avoid leaving things masked. + */ + _allint_clear(); + sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPINTMASK, SCTLR_EL1_NMI); + isb(); +} +#endif + #ifdef CONFIG_ARM64_BTI static void bti_enable(const struct arm64_cpu_capabilities *__unused) { @@ -2757,6 +2796,31 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP) }, +#ifdef CONFIG_ARM64_NMI + { + .desc = "Non-maskable Interrupts present", + .capability = ARM64_HAS_NMI, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, + .sys_reg = SYS_ID_AA64PFR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64PFR1_EL1_NMI_SHIFT, + .field_width = 4, + .min_field_value = ID_AA64PFR1_EL1_NMI_IMP, + .matches = has_cpuid_feature, + }, + { + .desc = "Non-maskable Interrupts enabled", + .capability = ARM64_USES_NMI, + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, + .sys_reg = SYS_ID_AA64PFR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64PFR1_EL1_NMI_SHIFT, + .field_width = 4, + .min_field_value = ID_AA64PFR1_EL1_NMI_IMP, + .matches = use_nmi, + .cpu_enable = nmi_enable, + }, +#endif {}, }; diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 676cc51f0d99..0a57e4030173 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -39,6 +39,7 @@ HAS_LDAPR HAS_LSE_ATOMICS HAS_MOPS HAS_NESTED_VIRT +HAS_NMI HAS_NO_FPSIMD HAS_NO_HW_PREFETCH HAS_PAN @@ -68,6 +69,7 @@ SPECTRE_BHB SSBS SVE UNMAP_KERNEL_AT_EL0 +USES_NMI WORKAROUND_834220 WORKAROUND_843419 WORKAROUND_845719 -- Gitee From f8574b77496cdb121489dd2d62785947b446f6b4 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 11 Nov 2022 18:45:29 +0000 Subject: [PATCH 07/19] KVM: arm64: Hide FEAT_NMI from guests commit 032d8169dbdc3c12142f3dc5b58b9c6d1a52fa59 openEuler FEAT_NMI is not yet useful to guests pending implementation of vGIC support. Mask out the feature from the ID register and prevent guests creating state in ALLINT.ALLINT by activating the trap on write provided in HCRX_EL2.TALLINT when they are running. There is no trap available for reads from ALLINT. We do not need to check for FEAT_HCRX since it is mandatory since v8.7 and FEAT_NMI is a v8.8 feature. Signed-off-by: Mark Brown Signed-off-by: Jie Liu Signed-off-by: huwentao --- arch/arm64/kvm/hyp/include/hyp/switch.h | 6 ++++++ arch/arm64/kvm/sys_regs.c | 1 + 2 files changed, 7 insertions(+) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 526085401f66..84c2d5873d2c 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -194,6 +194,9 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); } + if (cpus_have_final_cap(ARM64_HAS_NMI)) + sysreg_clear_set_s(SYS_HCRX_EL2, 0, HCRX_EL2_TALLINT); + vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); @@ -218,6 +221,9 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) { write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2); + if (cpus_have_final_cap(ARM64_HAS_NMI)) + sysreg_clear_set_s(SYS_HCRX_EL2, HCRX_EL2_TALLINT, 0); + write_sysreg(0, hstr_el2); if (kvm_arm_support_pmu_v3()) { struct kvm_cpu_context *hctxt; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2c4f4d4a1f55..6ac954c27219 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1372,6 +1372,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac); break; case SYS_ID_AA64ISAR1_EL1: -- Gitee From 251644356abaf7284c31461a1ca813ada2817d60 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 11 Nov 2022 13:35:25 +0000 Subject: [PATCH 08/19] arm64/nmi: Manage masking for superpriority interrupts along with DAIF commit dd8b74f042237727edef1187ffa936c3686d300b openEuler As we do for pseudo NMIs add code to our DAIF management which keeps superpriority interrupts unmasked when we have asynchronous exceptions enabled. Since superpriority interrupts are not masked through DAIF like pseduo NMIs are we also need to modify the assembler macros for managing DAIF to ensure that the masking is done in the assembly code. At present users of the assembly macros always mask pseudo NMIs. There is a difference to the actual handling between pseudo NMIs and superpriority interrupts in the assembly save_and_disable_irq and restore_irq macros, these cover both interrupts and FIQs using DAIF without regard for the use of pseudo NMIs so also mask those but are not updated here to mask superpriority interrupts. Given the names it is not clear that the behaviour with pseudo NMIs is particularly intentional, and in any case these macros are only used in the implementation of alternatives for software PAN while hardware PAN has been mandatory since v8.1 so it is not anticipated that practical systems with support for FEAT_NMI will ever execute the affected code. This should be a conservative set of masked regions, we may be able to relax this in future, but this should represent a good starting point. Signed-off-by: Mark Brown Signed-off-by: Jie Liu Signed-off-by: huwentao --- arch/arm64/include/asm/assembler.h | 2 ++ arch/arm64/include/asm/daifflags.h | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index c010f6df62e8..979ba9ddee41 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -51,11 +51,13 @@ alternative_else_nop_endif .endm .macro disable_daif + disable_allint msr daifset, #0xf .endm .macro enable_daif msr daifclr, #0xf + enable_allint .endm /* diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index b3bed2004342..2417cc6b1631 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #define DAIF_PROCCTX 0 @@ -35,6 +36,9 @@ static inline void local_daif_mask(void) if (system_uses_irq_prio_masking()) gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); + if (system_uses_nmi()) + _allint_set(); + trace_hardirqs_off(); } @@ -116,6 +120,14 @@ static inline void local_daif_restore(unsigned long flags) write_sysreg(flags, daif); + /* If we can take asynchronous errors we can take NMIs */ + if (system_uses_nmi()) { + if (flags & PSR_A_BIT) + _allint_set(); + else + _allint_clear(); + } + if (irq_disabled) trace_hardirqs_off(); } @@ -140,6 +152,14 @@ static inline void local_daif_inherit(struct pt_regs *regs) * use the pmr instead. */ write_sysreg(flags, daif); + + /* The ALLINT field is at the same position in pstate and ALLINT */ + if (system_uses_nmi()) { + if (regs->pstate & ALLINT_ALLINT) + _allint_set(); + else + _allint_clear(); + } } #endif -- Gitee From 7d9e3d2d073930ffd7f59edf172730c61d04dd45 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 12 Dec 2022 13:43:36 +0000 Subject: [PATCH 09/19] arm64/entry: Don't call preempt_schedule_irq() with NMIs masked commit 2117a37e18658d788b7eee9904b488570086b987 openEuler As we do for pseudo NMIs don't call preempt_schedule_irq() when architechted NMIs are masked. If they are masked then we are calling from a preempting context so skip preemption. Signed-off-by: Mark Brown Signed-off-by: Jie Liu Signed-off-by: huwentao --- arch/arm64/kernel/entry-common.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 5e8204d250b4..60051ba0cf21 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -246,6 +246,15 @@ static void __sched arm64_preempt_schedule_irq(void) if (READ_ONCE(current_thread_info()->preempt_count) != 0) return; + /* + * Architected NMIs are unmasked prior to handling regular + * IRQs and masked while handling FIQs. If ALLINT is set then + * we are in a NMI or other preempting context so skip + * preemption. + */ + if (system_uses_nmi() && (read_sysreg_s(SYS_ALLINT) & ALLINT_ALLINT)) + return; + /* * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC * priority masking is used the GIC irqchip driver will clear DAIF.IF -- Gitee From 7d744790cdefd3b4ef7d20de9b98ae57091eed87 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 2 Nov 2022 21:13:07 +0000 Subject: [PATCH 10/19] arm64/irq: Document handling of FEAT_NMI in irqflags.h commit feb4809ad8f4b5d14c643eff47ac208432eac95f openEuler We have documentation at the top of irqflags.h which explains the DAIF masking. Since the additional masking with NMIs is related and also covers the IF in DAIF extend the comment to note what's going on with NMIs though none of the code in irqflags.h is updated to handle NMIs. Signed-off-by: Mark Brown Signed-off-by: Jie Liu Signed-off-by: huwentao --- arch/arm64/include/asm/irqflags.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 1f31ec146d16..a9f117fb43c6 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -19,6 +19,16 @@ * always masked and unmasked together, and have no side effects for other * flags. Keeping to this order makes it easier for entry.S to know which * exceptions should be unmasked. + * + * With the addition of the FEAT_NMI extension we gain an additional + * class of superpriority IRQ/FIQ which is separately masked with a + * choice of modes controlled by SCTLR_ELn.{SPINTMASK,NMI}. Linux + * sets SPINTMASK to 0 and NMI to 1 which results in ALLINT.ALLINT + * masking both superpriority interrupts and IRQ/FIQ regardless of the + * I and F settings. Since these superpriority interrupts are being + * used as NMIs we do not include them in the interrupt masking here, + * anything that requires that NMIs be masked needs to explicitly do + * so. */ static __always_inline bool __irqflags_uses_pmr(void) -- Gitee From 4e180466cc01c3a0780a1248a5541c13ab8e5afd Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 11 Oct 2022 18:53:47 +0100 Subject: [PATCH 11/19] arm64/nmi: Add handling of superpriority interrupts as NMIs commit 8f1897e1dfc2c616c9ef834494c24755b8852be2 openEuler Our goal with superpriority interrupts is to use them as NMIs, taking advantage of the much smaller regions where they are masked to allow prompt handling of the most time critical interrupts. When an interrupt configured with superpriority we will enter EL1 as normal for any interrupt, the presence of a superpriority interrupt is indicated with a status bit in ISR_EL1. We use this to check for the presence of a superpriority interrupt before we unmask anything in elX_interrupt(), reporting without unmasking any interrupts. If no superpriority interrupt is present then we handle normal interrupts as normal, superpriority interrupts will be unmasked while doing so as a result of setting DAIF_PROCCTX. Both IRQs and FIQs may be configured with superpriority so we handle both, passing an additional root handler into the elX_interrupt() function along with the mask for the bit in ISR_EL1 which indicates the presence of the relevant kind of superpriority interrupt. These root handlers can be configured by the interrupt controller similarly to the root handlers for normal interrupts using the newly added set_handle_nmi_irq() and set_handle_nmi_fiq() functions. Signed-off-by: Mark Brown Signed-off-by: Jie Liu Signed-off-by: huwentao --- arch/arm64/include/asm/irq.h | 1 + arch/arm64/kernel/entry-common.c | 51 +++++++++++++++++++++++++++----- arch/arm64/kernel/irq.c | 22 ++++++++++++++ 3 files changed, 66 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index fac08e18bcd5..62fb5117ef7b 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -8,6 +8,7 @@ struct pt_regs; +int set_handle_nmi_irq(void (*handle_irq)(struct pt_regs *)); int set_handle_irq(void (*handle_irq)(struct pt_regs *)); #define set_handle_irq set_handle_irq int set_handle_fiq(void (*handle_fiq)(struct pt_regs *)); diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 60051ba0cf21..425263889411 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -289,6 +289,8 @@ static void do_interrupt_handler(struct pt_regs *regs, set_irq_regs(old_regs); } +extern void (*handle_arch_nmi_irq)(struct pt_regs *regs); +extern void (*handle_arch_nmi_fiq)(struct pt_regs *regs); extern void (*handle_arch_irq)(struct pt_regs *); extern void (*handle_arch_fiq)(struct pt_regs *); @@ -517,6 +519,14 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) } } +static __always_inline void __el1_nmi(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + arm64_enter_nmi(regs); + do_interrupt_handler(regs, handler); + arm64_exit_nmi(regs); +} + static __always_inline void __el1_pnmi(struct pt_regs *regs, void (*handler)(struct pt_regs *)) { @@ -538,9 +548,17 @@ static __always_inline void __el1_irq(struct pt_regs *regs, exit_to_kernel_mode(regs); } -static void noinstr el1_interrupt(struct pt_regs *regs, - void (*handler)(struct pt_regs *)) + +static void noinstr el1_interrupt(struct pt_regs *regs, u64 nmi_flag, + void (*handler)(struct pt_regs *), + void (*nmi_handler)(struct pt_regs *)) { + /* Is there a NMI to handle? */ + if (system_uses_nmi() && (read_sysreg(isr_el1) & nmi_flag)) { + __el1_nmi(regs, nmi_handler); + return; + } + write_sysreg(DAIF_PROCCTX_NOIRQ, daif); if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) @@ -551,12 +569,12 @@ static void noinstr el1_interrupt(struct pt_regs *regs, asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs) { - el1_interrupt(regs, handle_arch_irq); + el1_interrupt(regs, ISR_EL1_IS, handle_arch_irq, handle_arch_nmi_irq); } asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs) { - el1_interrupt(regs, handle_arch_fiq); + el1_interrupt(regs, ISR_EL1_FS, handle_arch_fiq, handle_arch_nmi_fiq); } asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs) @@ -779,11 +797,28 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) } } -static void noinstr el0_interrupt(struct pt_regs *regs, - void (*handler)(struct pt_regs *)) +static void noinstr el0_interrupt(struct pt_regs *regs, u64 nmi_flag, + void (*handler)(struct pt_regs *), + void (*nmi_handler)(struct pt_regs *)) { enter_from_user_mode(regs); + /* Is there a NMI to handle? */ + if (system_uses_nmi() && (read_sysreg(isr_el1) & nmi_flag)) { + /* + * Any system with FEAT_NMI should have FEAT_CSV2 and + * not be affected by Spectre v2 so we don't mitigate + * here. + */ + + arm64_enter_nmi(regs); + do_interrupt_handler(regs, nmi_handler); + arm64_exit_nmi(regs); + + exit_to_user_mode(regs); + return; + } + write_sysreg(DAIF_PROCCTX_NOIRQ, daif); if (regs->pc & BIT(55)) @@ -798,7 +833,7 @@ static void noinstr el0_interrupt(struct pt_regs *regs, static void noinstr __el0_irq_handler_common(struct pt_regs *regs) { - el0_interrupt(regs, handle_arch_irq); + el0_interrupt(regs, ISR_EL1_IS, handle_arch_irq, handle_arch_nmi_irq); } asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs) @@ -808,7 +843,7 @@ asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs) static void noinstr __el0_fiq_handler_common(struct pt_regs *regs) { - el0_interrupt(regs, handle_arch_fiq); + el0_interrupt(regs, ISR_EL1_FS, handle_arch_fiq, handle_arch_nmi_fiq); } asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs) diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 85087e2df564..b1f2a9b49039 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -87,6 +87,16 @@ void do_softirq_own_stack(void) } #endif +static void default_handle_nmi_irq(struct pt_regs *regs) +{ + panic("Superpriority IRQ taken without a root NMI IRQ handler\n"); +} + +static void default_handle_nmi_fiq(struct pt_regs *regs) +{ + panic("Superpriority FIQ taken without a root NMI FIQ handler\n"); +} + static void default_handle_irq(struct pt_regs *regs) { panic("IRQ taken without a root IRQ handler\n"); @@ -97,9 +107,21 @@ static void default_handle_fiq(struct pt_regs *regs) panic("FIQ taken without a root FIQ handler\n"); } +void (*handle_arch_nmi_irq)(struct pt_regs *) __ro_after_init = default_handle_nmi_irq; +void (*handle_arch_nmi_fiq)(struct pt_regs *) __ro_after_init = default_handle_nmi_fiq; void (*handle_arch_irq)(struct pt_regs *) __ro_after_init = default_handle_irq; void (*handle_arch_fiq)(struct pt_regs *) __ro_after_init = default_handle_fiq; +int __init set_handle_nmi_irq(void (*handle_nmi_irq)(struct pt_regs *)) +{ + if (handle_arch_nmi_irq != default_handle_nmi_irq) + return -EBUSY; + + handle_arch_nmi_irq = handle_nmi_irq; + pr_info("Root superpriority IRQ handler: %ps\n", handle_nmi_irq); + return 0; +} + int __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) { if (handle_arch_irq != default_handle_irq) -- Gitee From 266b413f1b9ceff35500878362e771b026f00d7c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 11 Oct 2022 12:57:00 +0100 Subject: [PATCH 12/19] arm64/nmi: Add Kconfig for NMI commit 77ca8065cb64d1066aa2276e6aecb7c176210922 openEuler Since NMI handling is in some fairly hot paths we provide a Kconfig option which allows support to be compiled out when not needed. Signed-off-by: Mark Brown Signed-off-by: Jie Liu Signed-off-by: huwentao --- arch/arm64/Kconfig | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 047e934d69a4..4ca7d468e962 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2178,6 +2178,23 @@ config ARM64_EPAN if the cpu does not implement the feature. endmenu # "ARMv8.7 architectural features" +menu "ARMv8.8 architectural features" + +config ARM64_NMI + bool "Enable support for Non-maskable Interrupts (NMI)" + default y + help + Non-maskable interrupts are an architecture and GIC feature + which allow the system to configure some interrupts to be + configured to have superpriority, allowing them to be handled + before other interrupts and masked for shorter periods of time. + + The feature is detected at runtime, and will remain disabled + if the cpu does not implement the feature. It will also be + disabled if pseudo NMIs are enabled at runtime. + +endmenu # "ARMv8.8 architectural features" + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y -- Gitee From 2731a6c26d4e1bf4502c5f64a7f0bd45c4ccca7a Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 13 Oct 2022 16:05:33 +0200 Subject: [PATCH 13/19] irqchip/gic-v3: Implement FEAT_GICv3_NMI support commit 0408b5bc43005221087db0b668aed30d06be9672 openEuler The FEAT_GICv3_NMI GIC feature coupled with the CPU FEAT_NMI enables handling NMI interrupts in HW on aarch64, by adding a superpriority interrupt to the existing GIC priority scheme. Implement GIC driver support for the FEAT_GICv3_NMI feature. Rename gic_supports_nmi() helper function to gic_supports_pseudo_nmis() to make the pseudo NMIs code path clearer and more explicit. Check, through the ARM64 capabilitity infrastructure, if support for FEAT_NMI was detected on the core and the system has not overridden the detection and forced pseudo-NMIs enablement. If FEAT_NMI is detected, it was not overridden (check embedded in the system_uses_nmi() call) and the GIC supports the FEAT_GICv3_NMI feature, install an NMI handler and initialize NMIs related HW GIC registers. Signed-off-by: Lorenzo Pieralisi Signed-off-by: Mark Brown Signed-off-by: Jie Liu Signed-off-by: huwentao --- drivers/irqchip/irq-gic-v3.c | 153 ++++++++++++++++++++++++----- include/linux/irqchip/arm-gic-v3.h | 4 + 2 files changed, 130 insertions(+), 27 deletions(-) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 6c7943c516eb..442f474ec656 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -61,6 +61,7 @@ struct gic_chip_data { u32 nr_redist_regions; u64 flags; bool has_rss; + bool has_nmi; unsigned int ppi_nr; struct partition_desc **ppi_descs; }; @@ -149,6 +150,20 @@ enum gic_intid_range { __INVALID_RANGE__ }; +#ifdef CONFIG_ARM64 +#include + +static inline bool has_v3_3_nmi(void) +{ + return gic_data.has_nmi && system_uses_nmi(); +} +#else +static inline bool has_v3_3_nmi(void) +{ + return false; +} +#endif + static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) { switch (hwirq) { @@ -387,6 +402,42 @@ static int gic_peek_irq(struct irq_data *d, u32 offset) return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); } +static DEFINE_RAW_SPINLOCK(irq_controller_lock); + +static void gic_irq_configure_nmi(struct irq_data *d, bool enable) +{ + void __iomem *base, *addr; + u32 offset, index, mask, val; + + offset = convert_offset_index(d, GICD_INMIR, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) + base = gic_data_rdist_sgi_base(); + else + base = gic_data.dist_base; + + addr = base + offset + (index / 32) * 4; + + raw_spin_lock(&irq_controller_lock); + + val = readl_relaxed(addr); + val = enable ? (val | mask) : (val & ~mask); + writel_relaxed(val, addr); + + raw_spin_unlock(&irq_controller_lock); +} + +static void gic_irq_enable_nmi(struct irq_data *d) +{ + gic_irq_configure_nmi(d, true); +} + +static void gic_irq_disable_nmi(struct irq_data *d) +{ + gic_irq_configure_nmi(d, false); +} + static void gic_poke_irq(struct irq_data *d, u32 offset) { void __iomem *base; @@ -432,7 +483,7 @@ static void gic_unmask_irq(struct irq_data *d) gic_poke_irq(d, GICD_ISENABLER); } -static inline bool gic_supports_nmi(void) +static inline bool gic_supports_pseudo_nmis(void) { return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && static_branch_likely(&supports_pseudo_nmis); @@ -535,7 +586,7 @@ static int gic_irq_nmi_setup(struct irq_data *d) { struct irq_desc *desc = irq_to_desc(d->irq); - if (!gic_supports_nmi()) + if (!gic_supports_pseudo_nmis() && !has_v3_3_nmi()) return -EINVAL; if (gic_peek_irq(d, GICD_ISENABLER)) { @@ -563,7 +614,10 @@ static int gic_irq_nmi_setup(struct irq_data *d) desc->handle_irq = handle_fasteoi_nmi; } - gic_irq_set_prio(d, GICD_INT_NMI_PRI); + if (has_v3_3_nmi()) + gic_irq_enable_nmi(d); + else + gic_irq_set_prio(d, GICD_INT_NMI_PRI); return 0; } @@ -572,7 +626,7 @@ static void gic_irq_nmi_teardown(struct irq_data *d) { struct irq_desc *desc = irq_to_desc(d->irq); - if (WARN_ON(!gic_supports_nmi())) + if (WARN_ON(!gic_supports_pseudo_nmis() && !has_v3_3_nmi())) return; if (gic_peek_irq(d, GICD_ISENABLER)) { @@ -598,7 +652,10 @@ static void gic_irq_nmi_teardown(struct irq_data *d) desc->handle_irq = handle_fasteoi_irq; } - gic_irq_set_prio(d, GICD_INT_DEF_PRI); + if (has_v3_3_nmi()) + gic_irq_disable_nmi(d); + else + gic_irq_set_prio(d, GICD_INT_DEF_PRI); } static bool gic_arm64_erratum_2941627_needed(struct irq_data *d) @@ -757,7 +814,7 @@ static inline void gic_complete_ack(u32 irqnr) static bool gic_rpr_is_nmi_prio(void) { - if (!gic_supports_nmi()) + if (!gic_supports_pseudo_nmis()) return false; return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI)); @@ -789,7 +846,8 @@ static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs) gic_complete_ack(irqnr); if (generic_handle_domain_nmi(gic_data.domain, irqnr)) { - WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr); + WARN_ONCE(true, "Unexpected %sNMI (irqnr %u)\n", + gic_supports_pseudo_nmis() ? "pseudo-" : "", irqnr); gic_deactivate_unhandled(irqnr); } } @@ -865,9 +923,37 @@ static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs) __gic_handle_nmi(irqnr, regs); } +#ifdef CONFIG_ARM64 +static inline u64 gic_read_nmiar(void) +{ + u64 irqstat; + + irqstat = read_sysreg_s(SYS_ICC_NMIAR1_EL1); + + dsb(sy); + + return irqstat; +} + +static asmlinkage void __exception_irq_entry gic_handle_nmi_irq(struct pt_regs *regs) +{ + u32 irqnr = gic_read_nmiar(); + + __gic_handle_nmi(irqnr, regs); +} + +static inline void gic_setup_nmi_handler(void) +{ + if (has_v3_3_nmi()) + set_handle_nmi_irq(gic_handle_nmi_irq); +} +#else +static inline void gic_setup_nmi_handler(void) { } +#endif + static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) { - if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs))) + if (unlikely(gic_supports_pseudo_nmis() && !interrupts_enabled(regs))) __gic_handle_irq_from_irqsoff(regs); else __gic_handle_irq_from_irqson(regs); @@ -1157,7 +1243,7 @@ static void gic_cpu_sys_reg_init(void) /* Set priority mask register */ if (!gic_prio_masking_enabled()) { write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); - } else if (gic_supports_nmi()) { + } else if (gic_supports_pseudo_nmis()) { /* * Mismatch configuration with boot CPU, the system is likely * to die as interrupt masking will not work properly on all @@ -1947,25 +2033,8 @@ static const struct gic_quirk gic_quirks[] = { } }; -static void gic_enable_nmi_support(void) +static void gic_enable_pseudo_nmis(void) { - int i; - - if (!gic_prio_masking_enabled()) - return; - - if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) { - pr_warn("Skipping NMI enable due to firmware issues\n"); - return; - } - - ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); - if (!ppi_nmi_refs) - return; - - for (i = 0; i < gic_data.ppi_nr; i++) - refcount_set(&ppi_nmi_refs[i], 0); - pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); @@ -2000,6 +2069,33 @@ static void gic_enable_nmi_support(void) static_branch_enable(&gic_nonsecure_priorities); static_branch_enable(&supports_pseudo_nmis); +} + +static void gic_enable_nmi_support(void) +{ + int i; + + if (!gic_prio_masking_enabled() && !has_v3_3_nmi()) + return; + + if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) { + pr_warn("Skipping NMI enable due to firmware issues\n"); + return; + } + + ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); + if (!ppi_nmi_refs) + return; + + for (i = 0; i < gic_data.ppi_nr; i++) + refcount_set(&ppi_nmi_refs[i], 0); + + /* + * Initialize pseudo-NMIs only if GIC driver cannot take advantage + * of core (FEAT_NMI) and GIC (FEAT_GICv3_NMI) in HW + */ + if (!has_v3_3_nmi()) + gic_enable_pseudo_nmis(); if (static_branch_likely(&supports_deactivate_key)) gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; @@ -2068,6 +2164,7 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base, irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); gic_data.has_rss = !!(typer & GICD_TYPER_RSS); + gic_data.has_nmi = !!(typer & GICD_TYPER_NMI); if (typer & GICD_TYPER_MBIS) { err = mbi_init(handle, gic_data.domain); @@ -2077,6 +2174,8 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base, set_handle_irq(gic_handle_irq); + gic_setup_nmi_handler(); + gic_update_rdist_properties(); gic_dist_init(); diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 728691365464..3306456c135f 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -30,6 +30,7 @@ #define GICD_ICFGR 0x0C00 #define GICD_IGRPMODR 0x0D00 #define GICD_NSACR 0x0E00 +#define GICD_INMIR 0x0F80 #define GICD_IGROUPRnE 0x1000 #define GICD_ISENABLERnE 0x1200 #define GICD_ICENABLERnE 0x1400 @@ -39,6 +40,7 @@ #define GICD_ICACTIVERnE 0x1C00 #define GICD_IPRIORITYRnE 0x2000 #define GICD_ICFGRnE 0x3000 +#define GICD_INMIRnE 0x3B00 #define GICD_IROUTER 0x6000 #define GICD_IROUTERnE 0x8000 #define GICD_IDREGS 0xFFD0 @@ -83,6 +85,7 @@ #define GICD_TYPER_LPIS (1U << 17) #define GICD_TYPER_MBIS (1U << 16) #define GICD_TYPER_ESPI (1U << 8) +#define GICD_TYPER_NMI (1U << 9) #define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) #define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) @@ -238,6 +241,7 @@ #define GICR_ICFGR0 GICD_ICFGR #define GICR_IGRPMODR0 GICD_IGRPMODR #define GICR_NSACR GICD_NSACR +#define GICR_INMIR0 GICD_INMIR #define GICR_TYPER_PLPIS (1U << 0) #define GICR_TYPER_VLPIS (1U << 1) -- Gitee From 79714cf253c7e5b458a06f5d85775031c90f451d Mon Sep 17 00:00:00 2001 From: Jie Liu Date: Mon, 19 Feb 2024 09:34:33 +0800 Subject: [PATCH 14/19] config: enable CONFIG_ARM64_NMI and CONFIG_HARDLOCKUP_DETECTOR_PERF for arm64 commit c0885532df81dcdbda789adb148938a89b59a575 openEuler Set CONFIG_ARM64_NMI=y and CONFIG_HARDLOCKUP_DETECTOR_PERF=y in arm64 tencentconfig. Signed-off-by: Jie Liu Signed-off-by: huwentao --- arch/arm64/configs/tencent.config | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index 9fe80baf90a7..e7c12f49308c 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1752,3 +1752,5 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m CONFIG_MEMTEST=y +CONFIG_ARM64_NMI=y +CONFIG_HARDLOCKUP_DETECTOR_PERF=y -- Gitee From b087eeb7a685804fd4d049140524f7e67d03cf5e Mon Sep 17 00:00:00 2001 From: Jinjie Ruan Date: Tue, 30 Jan 2024 12:38:58 +0000 Subject: [PATCH 15/19] irqchip/gic-v3: Fix hard LOCKUP caused by NMI being masked commit eefea61569216c047964a2ee1e370b5dc4fb29da openEuler When handling an exception, both daif and allint will be set by hardware. In __gic_handle_irq_from_irqson(), it only consider the Pseudo-NMI by clear daif.I and daif.F and set PMR to GIC_PRIO_IRQOFF to enable Pseudo-NMI and mask IRQ. If the hardwire NMI is enabled, it should also clear allint to enable hardware NMI and mask IRQ before handle a IRQ, otherwise the allint will be set in softirq context and local_irq_enable() can not enable IRQ, and watchdog NMI can not enter too which will cause below hard LOCKUP. And in gic_handle_irq(), it only consider the Pseudo-NMI when an exception has been taken from a context with IRQs disabled. So add a gic_supports_nmi() helper which consider both Pseudo-NMI and hardware NMI. And define PSR_ALLINT_BIT bit and update interrupts_enabled() as well as fast_interrupts_enabled() to consider the ALLINT bit. watchdog: Watchdog detected hard LOCKUP on cpu 1 Modules linked in: Sending NMI from CPU 0 to CPUs 1: Kernel panic - not syncing: Hard LOCKUP CPU: 0 PID: 0 Comm: swapper/0 Not tainted 6.6.0-gec40ec8c5e9f #295 Hardware name: linux,dummy-virt (DT) Call trace: dump_backtrace+0x98/0xf8 show_stack+0x20/0x38 dump_stack_lvl+0x48/0x60 dump_stack+0x18/0x28 panic+0x384/0x3e0 nmi_panic+0x94/0xa0 watchdog_hardlockup_check+0x1bc/0x1c8 watchdog_buddy_check_hardlockup+0x68/0x80 watchdog_timer_fn+0x88/0x2f8 __hrtimer_run_queues+0x17c/0x368 hrtimer_run_queues+0xd4/0x158 update_process_times+0x3c/0xc0 tick_periodic+0x44/0xc8 tick_handle_periodic+0x3c/0xb0 arch_timer_handler_virt+0x3c/0x58 handle_percpu_devid_irq+0x90/0x248 generic_handle_domain_irq+0x34/0x58 gic_handle_irq+0x58/0x110 call_on_irq_stack+0x24/0x58 do_interrupt_handler+0x88/0x98 el1_interrupt+0x40/0xc0 el1h_64_irq_handler+0x24/0x30 el1h_64_irq+0x64/0x68 default_idle_call+0x5c/0x160 do_idle+0x220/0x288 cpu_startup_entry+0x40/0x50 rest_init+0xf0/0xf8 arch_call_rest_init+0x18/0x20 start_kernel+0x520/0x668 __primary_switched+0xbc/0xd0 Fixes: dd8b74f04223 ("arm64/nmi: Manage masking for superpriority interrupts along with DAIF") Signed-off-by: Jinjie Ruan Signed-off-by: Jie Liu Signed-off-by: huwentao --- arch/arm64/include/asm/ptrace.h | 5 +++-- arch/arm64/include/uapi/asm/ptrace.h | 1 + drivers/irqchip/irq-gic-v3.c | 5 +++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 47ec58031f11..81bb2cb17d85 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -240,10 +240,11 @@ static inline void forget_syscall(struct pt_regs *regs) true) #define interrupts_enabled(regs) \ - (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs)) + (!((regs)->pstate & PSR_ALLINT_BIT) && !((regs)->pstate & PSR_I_BIT) && \ + irqs_priority_unmasked(regs)) #define fast_interrupts_enabled(regs) \ - (!((regs)->pstate & PSR_F_BIT)) + (!((regs)->pstate & PSR_ALLINT_BIT) && !(regs)->pstate & PSR_F_BIT) static inline unsigned long user_stack_pointer(struct pt_regs *regs) { diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 7fa2f7036aa7..8a125a1986be 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -48,6 +48,7 @@ #define PSR_D_BIT 0x00000200 #define PSR_BTYPE_MASK 0x00000c00 #define PSR_SSBS_BIT 0x00001000 +#define PSR_ALLINT_BIT 0x00002000 #define PSR_PAN_BIT 0x00400000 #define PSR_UAO_BIT 0x00800000 #define PSR_DIT_BIT 0x01000000 diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 442f474ec656..1a92f603f648 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -151,6 +151,7 @@ enum gic_intid_range { }; #ifdef CONFIG_ARM64 +#include #include static inline bool has_v3_3_nmi(void) @@ -881,6 +882,10 @@ static void __gic_handle_irq_from_irqson(struct pt_regs *regs) if (gic_prio_masking_enabled()) { gic_pmr_mask_irqs(); gic_arch_enable_irqs(); + } else if (has_v3_3_nmi()) { +#ifdef CONFIG_ARM64_NMI + _allint_clear(); +#endif } if (!is_nmi) -- Gitee From d2bb25985c59ad1a306221c0416676f462e44e9c Mon Sep 17 00:00:00 2001 From: Jinjie Ruan Date: Fri, 29 Mar 2024 16:44:23 +0800 Subject: [PATCH 16/19] arm64: Enable hardware NMI for perf events NMI commit 5cc820c154b7894835919d6a64720f198c907714 openEuler Like pseudo NMI, also select HAVE_PERF_EVENTS_NMI for hardware NMI, and update the comment for arch_perf_nmi_is_available(). Signed-off-by: Jinjie Ruan Signed-off-by: huwentao --- arch/arm64/Kconfig | 2 +- arch/arm64/kernel/watchdog_hld.c | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 4ca7d468e962..27db0bcceb29 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -222,7 +222,7 @@ config ARM64 select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI select HAVE_PERF_EVENTS - select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI + select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI || ARM64_NMI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_PREEMPT_DYNAMIC_KEY diff --git a/arch/arm64/kernel/watchdog_hld.c b/arch/arm64/kernel/watchdog_hld.c index dcd25322127c..817f0b7f6f92 100644 --- a/arch/arm64/kernel/watchdog_hld.c +++ b/arch/arm64/kernel/watchdog_hld.c @@ -28,9 +28,10 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh) bool __init arch_perf_nmi_is_available(void) { /* - * hardlockup_detector_perf_init() will success even if Pseudo-NMI turns off, - * however, the pmu interrupts will act like a normal interrupt instead of - * NMI and the hardlockup detector would be broken. + * hardlockup_detector_perf_init() will success even if Pseudo-NMI or + * Hardware NMI turns off. However, the pmu interrupts will act like + * a normal interrupt instead of NMI and the hardlockup detector would + * be broken. */ return arm_pmu_irq_is_nmi(); } -- Gitee From ae7e66fdfbce7e0c3a084b91579a2dec6b2339c5 Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Thu, 16 May 2024 17:20:16 +0800 Subject: [PATCH 17/19] irqchip/gic-v3: Fix one race condition due to NMI withdraw commit 626602294dca57b664e95ba72ee32435179af627 openEuler The introduce of FEAT_NMI/FEAT_GICv3_NMI will cause a race problem that we may handle the normal interrupt in interrupt disabled context due to the withdraw of NMI interrupt. The flow will be like below: [interrupt disabled] <- normal interrupt pending, for example timer interrupt <- NMI occurs, ISR_EL1.nmi = 1 do_el1_interrupt() <- NMI withdraw, ISR_EL1.nmi = 0 ISR_EL1.nmi = 0, not an NMI interrupt gic_handle_irq() __gic_handle_irq_from_irqson() irqnr = gic_read_iar() <- Oops, ack and handle an normal interrupt in interrupt disabled context! Fix this by checking the interrupt status in __gic_handle_irq_from_irqson() and ignore the interrupt if we're in interrupt disabled context. Fixes: 2731a6c26d4e ("irqchip/gic-v3: Implement FEAT_GICv3_NMI support") Signed-off-by: Yicong Yang Signed-off-by: Jie Liu Signed-off-by: huwentao --- drivers/irqchip/irq-gic-v3.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 1a92f603f648..101b899ac5ea 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -869,6 +869,28 @@ static void __gic_handle_irq_from_irqson(struct pt_regs *regs) bool is_nmi; u32 irqnr; + /* + * We should enter here with interrupts disabled, otherwise we may met + * a race here with FEAT_NMI/FEAT_GICv3_NMI: + * + * [interrupt disabled] + * <- normal interrupt pending, for example timer interrupt + * <- NMI occurs, ISR_EL1.nmi = 1 + * do_el1_interrupt() + * <- NMI withdraw, ISR_EL1.nmi = 0 + * ISR_EL1.nmi = 0, not an NMI interrupt + * gic_handle_irq() + * __gic_handle_irq_from_irqson() + * irqnr = gic_read_iar() <- Oops, ack and handle an normal interrupt + * in interrupt disabled context! + * + * So if we met this case here, just return from the interrupt context. + * Since the interrupt is still pending, we can handle it once the + * interrupt re-enabled and it'll not be missing. + */ + if (!interrupts_enabled(regs)) + return; + irqnr = gic_read_iar(); is_nmi = gic_rpr_is_nmi_prio(); -- Gitee From f7a7f978ef69988a800c1036e133910ebe8232a3 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:30 +0800 Subject: [PATCH 18/19] tencent.config: Enable SDEI Watchdog commit 67d9ff039accc03ac72ca03124a306141f5aa0c4 openEuler Enable SDEI Watchdog for ARM64. Signed-off-by: Xiongfeng Wang Signed-off-by: zhangguangzhi <908293048@qq.com> --- arch/arm64/configs/tencent.config | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index e7c12f49308c..052ee68804e6 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1753,4 +1753,5 @@ CONFIG_TEST_UDELAY=m CONFIG_TEST_STATIC_KEYS=m CONFIG_MEMTEST=y CONFIG_ARM64_NMI=y -CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_HARDLOCKUP_DETECTOR_PERF is not set +CONFIG_SDEI_WATCHDOG=y -- Gitee From 436eb96bcf08fb60a76ee092bf8a54346d07233c Mon Sep 17 00:00:00 2001 From: Jinjie Ruan Date: Thu, 28 Mar 2024 20:01:32 +0800 Subject: [PATCH 19/19] irqchip/gic-v3: Fix a system stall when using pseudo NMI with CONFIG_ARM64_NMI closed commit f7cea6febbbc02eebfd80209c6b44883b3dd6a63 openEuler A system stall occurrs when using pseudo NMI with CONFIG_ARM64_NMI closed. If the hardware supports FEAT_NMI, the ALLINT bit in pstate may set or clear on exception trap whether the software enables it or not, so it is not safe to use it to check interrupts_enabled() or fast_interrupts_enabled() when FEAT_NMI not enabled in kernel, so recover it. After applying this patch, the system stall not happen again on hardware with FEAT_NMI feature. Fixes: b087eeb7a685 ("irqchip/gic-v3: Fix hard LOCKUP caused by NMI being masked") Signed-off-by: Jinjie Ruan Signed-off-by: zhaolichang <943677312@qq.com> --- arch/arm64/include/asm/ptrace.h | 5 ++--- arch/arm64/include/uapi/asm/ptrace.h | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 81bb2cb17d85..a90ae87ebec5 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -240,11 +240,10 @@ static inline void forget_syscall(struct pt_regs *regs) true) #define interrupts_enabled(regs) \ - (!((regs)->pstate & PSR_ALLINT_BIT) && !((regs)->pstate & PSR_I_BIT) && \ - irqs_priority_unmasked(regs)) + (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs)) #define fast_interrupts_enabled(regs) \ - (!((regs)->pstate & PSR_ALLINT_BIT) && !(regs)->pstate & PSR_F_BIT) + (!(regs)->pstate & PSR_F_BIT) static inline unsigned long user_stack_pointer(struct pt_regs *regs) { diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 8a125a1986be..7fa2f7036aa7 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -48,7 +48,6 @@ #define PSR_D_BIT 0x00000200 #define PSR_BTYPE_MASK 0x00000c00 #define PSR_SSBS_BIT 0x00001000 -#define PSR_ALLINT_BIT 0x00002000 #define PSR_PAN_BIT 0x00400000 #define PSR_UAO_BIT 0x00800000 #define PSR_DIT_BIT 0x01000000 -- Gitee