From 47e61f0ca6d7cc2cdaa01fbf83cb7b4269515a0c Mon Sep 17 00:00:00 2001 From: zhangjunmar <12733587+zhangjunmar@user.noreply.gitee.com> Date: Tue, 13 Jan 2026 10:15:57 +0800 Subject: [PATCH 1/2] Hygon:support to flush all types of branches rather than indirect branches type only in the Branch Target Buffer with the IBPB command. ANBZ:#29445 The IBPB command only flushes indirect branch types in the Branch Target Buffer on HYGON C86-4G CPUs and the later CPUs, but all types of branches need to be flushed with the IBPB command to mitigate SRSO(INCEPTION) in some scenarios.The full mitigation could be achieved by updating microcode for AMD. However, HYGON can offer this function by setting bit 55 of MSR_ZEN4_BP_CFG rather than updating microcode which is indicated by has_microcode variable. We define a new kernel command-line interface to set this bit. We also provide a new function ibpb_can_flush_all() as a replacement for the AMD state indicated by has_microcode variable. Users could control whether IBPB flushes all branches via the parameter in kernel command-line according to the attack risks and the performance costs. Signed-off-by: zhangjunmar <12733587+zhangjunmar@user.noreply.gitee.com> --- Documentation/admin-guide/hw-vuln/srso.rst | 19 ++++++ arch/x86/kernel/cpu/bugs.c | 73 ++++++++++++++++++++++ 2 files changed, 92 insertions(+) diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst index f79cb11b080f..b93fc73aac15 100644 --- a/Documentation/admin-guide/hw-vuln/srso.rst +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -30,12 +30,20 @@ Affected processors AMD Zen, generations 1-4. That is, all families 0x17 and 0x19. Older processors have not been investigated. +HYGON generations 1~4. + System information and options ------------------------------ First of all, it is required that the latest microcode be loaded for mitigations to be effective. +For HYGON 1~3 CPUs, all types of branches are flushed with IBPB by default, +so no microcode or kernel parameter is needed. +For HYGON 4 CPU, all branches can be flushed by setting ibpb_brtype=ibpb-all +or by using the default value instead of loading microcode. +See the following description of the kernel parameter ibpb_brtype. + The sysfs file showing SRSO mitigation status is: /sys/devices/system/cpu/vulnerabilities/spec_rstack_overflow @@ -79,6 +87,17 @@ The possible values in this file are: (spec_rstack_overflow=ibpb-vmexit) +In order to mitigate the attacks to the user space tasks, +IBPB must be set to flush all types of branches by setting kernel parameter ibpb_brtype. +The description of kernel parameter ibpb_brtype is as follows: +ibpb_brtype= [X86, HYGON only] + IBPB action control flag + Format: { ibpb-all | ibpb-ind } + ibpb-all -- IBPB flushes all types of branches, + this is the default value if the command line + does not specify the ibpb_brtype value. + ibpb-ind -- IBPB flushes only indirect branches. + In order to exploit vulnerability, an attacker needs to: - gain local access on the machine diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 51dfedc68d6a..fd629496fc5e 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2013,6 +2013,45 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) } } +/** + * enum ibpb_brtype_cmd - IBPB action control flag + * @IBPB_FLUSH_IND:IBPB command only flushes indirect branches in branch target buffer + * @IBPB_FLUSH_ALL:IBPB command flushes all types of branches in branch target buffer + */ +enum ibpb_brtype_cmd { + IBPB_FLUSH_IND, + IBPB_FLUSH_ALL, +}; +static enum ibpb_brtype_cmd ibpb_brtype __ro_after_init = IBPB_FLUSH_ALL; +/** + * The kernel parameter ibpb_brtype is used to control + * whether IBPB flushes all branches or indirect branches: + * ibpb_brtype= [X86, HYGON only] + * IBPB action control flag + * Format: { ibpb-all | ibpb-ind } + * ibpb-all -- IBPB flushes all types of branches,this is the default value. + * ibpb-ind -- IBPB flushes only indirect branches. + */ +static int __init ibpb_brtype_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!strcmp(str, "ibpb-all")) { + ibpb_brtype = IBPB_FLUSH_ALL; + pr_info("IBPB flushes all branches.\n"); + } else if (!strcmp(str, "ibpb-ind")) { + ibpb_brtype = IBPB_FLUSH_IND; + pr_info("IBPB flushes only indirect branches.\n"); + } else + pr_err("Ignoring unknown ibpb branch type option (%s).", str); + + return 0; +} +early_param("ibpb_brtype", ibpb_brtype_cmdline); + +#define IBPB_FLUSH_ALL_BIT 55 + void x86_spec_ctrl_setup_ap(void) { if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) @@ -2020,6 +2059,13 @@ void x86_spec_ctrl_setup_ap(void) if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) x86_amd_ssb_disable(); + + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + (boot_cpu_data.x86 == 0x18)) { + if ((boot_cpu_data.x86_model > 0x3) && + (ibpb_brtype == IBPB_FLUSH_ALL)) + msr_set_bit(MSR_ZEN4_BP_CFG, IBPB_FLUSH_ALL_BIT); + } } bool itlb_multihit_kvm_mitigation; @@ -2201,6 +2247,29 @@ early_param("spec_rstack_overflow", srso_parse_cmdline); #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." +/* + * ibpb_can_flush_all() - set IBPB flush type according to the cmdline param + * - and check whether IBPB can flush all branches + * @return: true when IBPB can flush all types of branches and + * false when IBPB can flush only indirect branches. + */ +bool ibpb_can_flush_all(void) +{ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + (boot_cpu_data.x86 == 0x18)) { + if (boot_cpu_data.x86_model <= 0x3) { + return true; + } else if (ibpb_brtype == IBPB_FLUSH_ALL) { + msr_set_bit(MSR_ZEN4_BP_CFG, IBPB_FLUSH_ALL_BIT); + return true; + } + return false; + } + + pr_err("WARNING: this ibpb check is only used for HYGON.\n"); + return false; +} + static void __init srso_select_mitigation(void) { bool has_microcode; @@ -2213,6 +2282,10 @@ static void __init srso_select_mitigation(void) * for guests to verify whether IBPB is a viable mitigation. */ has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode(); + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + has_microcode = ibpb_can_flush_all(); + if (!has_microcode) { pr_warn("IBPB-extending microcode not applied!\n"); pr_warn(SRSO_NOTICE); -- Gitee From c52bcb6ea626ef0f62dc4afb5382de5930fded6c Mon Sep 17 00:00:00 2001 From: zhangjunmar <12733587+zhangjunmar@user.noreply.gitee.com> Date: Tue, 13 Jan 2026 14:22:25 +0800 Subject: [PATCH 2/2] Hygon:Support hardware-based mitigation for Retbleed and SRSO via Hygon IBRS to avoid performance degradation caused by software-based mitigation. ANBZ:#29446 Hygon IBRS is different from AMD Auto IBRS. It mitigates vulnerabilities based on the predicted branch type rather than the actual branch type; therefore, it can mitigate both Retbleed and SRSO by preventing predicted branch types from being used in the kernel. We enable Hygon IBRS when it is detected as available by the x86 feature flag. Hygon IBRS is also preferred as a default mitigation for Retbleed and SRSO because of the performance degradation caused by software-based mitigation. We set bit 56 of MSR_ZEN4_BP_CFG to flush RAS (Return Address Stack) upon privilege level changes from low to high.This method of flushing RAS is used as a replacement for RSB filling when IBRS is enabled. Signed-off-by: zhangjunmar <12733587+zhangjunmar@user.noreply.gitee.com> --- arch/x86/kernel/cpu/bugs.c | 63 ++++++++++++++++++++++++++++++++++-- arch/x86/kernel/cpu/common.c | 10 ++++-- arch/x86/kernel/cpu/hygon.c | 18 +++++++++++ 3 files changed, 86 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index fd629496fc5e..aad82d4539b4 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -878,6 +878,19 @@ static void __init retbleed_select_mitigation(void) break; } + /* Enhanced IBRS (eIBRS) is preferred on HYGON processors. */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + switch (spectre_v2_enabled) { + case SPECTRE_V2_EIBRS: + case SPECTRE_V2_EIBRS_RETPOLINE: + case SPECTRE_V2_EIBRS_LFENCE: + retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; + break; + default: + break; + } + } + switch (retbleed_mitigation) { case RETBLEED_MITIGATION_UNRET: setup_force_cpu_cap(X86_FEATURE_RETHUNK); @@ -1362,6 +1375,9 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_ return; case SPECTRE_V2_EIBRS_RETPOLINE: + /* Hygon Enhanced IBRS flushes RAS upon privilege level changes from low to high. */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return; case SPECTRE_V2_RETPOLINE: case SPECTRE_V2_LFENCE: case SPECTRE_V2_IBRS: @@ -1526,7 +1542,21 @@ static void __init spectre_v2_select_mitigation(void) * FIXME: Is this pointless for retbleed-affected AMD? */ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); - pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); + + /* Hygon Enhanced IBRS flushes RAS upon privilege level changes from low to high. */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + switch (spectre_v2_enabled) { + case SPECTRE_V2_EIBRS: + case SPECTRE_V2_EIBRS_RETPOLINE: + case SPECTRE_V2_EIBRS_LFENCE: + setup_clear_cpu_cap(X86_FEATURE_RSB_CTXSW); + break; + default: + pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); + break; + } + } else + pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); spectre_v2_determine_rsb_fill_type_at_vmexit(mode); @@ -2202,6 +2232,7 @@ enum srso_mitigation { SRSO_MITIGATION_SAFE_RET, SRSO_MITIGATION_IBPB, SRSO_MITIGATION_IBPB_ON_VMEXIT, + SRSO_MITIGATION_EIBRS, }; enum srso_mitigation_cmd { @@ -2217,7 +2248,8 @@ static const char * const srso_strings[] = { [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode", [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET", [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", - [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only" + [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only", + [SRSO_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", }; static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; @@ -2314,6 +2346,21 @@ static void __init srso_select_mitigation(void) } } + /* Enhanced IBRS (eIBRS) is preferred on HYGON processors. */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + switch (spectre_v2_enabled) { + case SPECTRE_V2_EIBRS: + case SPECTRE_V2_EIBRS_RETPOLINE: + case SPECTRE_V2_EIBRS_LFENCE: + srso_mitigation = SRSO_MITIGATION_EIBRS; + pr_info("%s%s\n", srso_strings[srso_mitigation], + (has_microcode ? "" : ", no microcode")); + goto pred_cmd; + default: + break; + } + } + switch (srso_cmd) { case SRSO_CMD_OFF: goto pred_cmd; @@ -2569,11 +2616,21 @@ static ssize_t retbleed_show_state(char *buf) boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); - return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + return sysfs_emit(buf, "%s; SMT %s\n", + retbleed_strings[retbleed_mitigation], !sched_smt_active() ? "disabled" : + spectre_v2_in_eibrs_mode(spectre_v2_enabled) || spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? "enabled with STIBP protection" : "vulnerable"); + } + + return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], + !sched_smt_active() ? "disabled" : + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? + "enabled with STIBP protection" : "vulnerable"); } return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 8deb567c92a6..5934f8715e1f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -954,7 +954,10 @@ static void init_speculation_control(struct cpuinfo_x86 *c) * Intel CPUs, for finer-grained selection of what's available. */ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { - set_cpu_cap(c, X86_FEATURE_IBRS); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + set_cpu_cap(c, X86_FEATURE_IBRS_ENHANCED); + else + set_cpu_cap(c, X86_FEATURE_IBRS); set_cpu_cap(c, X86_FEATURE_IBPB); set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); } @@ -967,7 +970,10 @@ static void init_speculation_control(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_SSBD); if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { - set_cpu_cap(c, X86_FEATURE_IBRS); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + set_cpu_cap(c, X86_FEATURE_IBRS_ENHANCED); + else + set_cpu_cap(c, X86_FEATURE_IBRS); set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); } diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index f57257465d0c..0959269727c2 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -24,6 +24,7 @@ #include "cpu.h" +#define IBRS_FLUSH_RAS_BIT 56 #define APICID_SOCKET_ID_BIT 6 /* @@ -330,12 +331,29 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) setup_clear_cpu_cap(X86_FEATURE_CSV3); } +/* + * cpu_vul_mitigation() - set the basic configuration to mitigate CPU vulnerabilities + */ +static void cpu_vul_mitigation(void) +{ + /* + * Automatically flush RAS upon protection level changes from low to high. + * it's used as rsb mitigation instead of RSB filling. + */ + if ((boot_cpu_data.x86 == 0x18) && + (boot_cpu_data.x86_model > 0x3)) { + msr_set_bit(MSR_ZEN4_BP_CFG, IBRS_FLUSH_RAS_BIT); + } +} + static void early_init_hygon(struct cpuinfo_x86 *c) { u32 dummy; early_init_hygon_mc(c); + cpu_vul_mitigation(); + set_cpu_cap(c, X86_FEATURE_K8); rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); -- Gitee