diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst index f79cb11b080f67733c4a4d08699a991f95aeb015..b93fc73aac151b451d5264a58c37d2bdb5fe458e 100644 --- a/Documentation/admin-guide/hw-vuln/srso.rst +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -30,12 +30,20 @@ Affected processors AMD Zen, generations 1-4. That is, all families 0x17 and 0x19. Older processors have not been investigated. +HYGON generations 1~4. + System information and options ------------------------------ First of all, it is required that the latest microcode be loaded for mitigations to be effective. +For HYGON 1~3 CPUs, all types of branches are flushed with IBPB by default, +so no microcode or kernel parameter is needed. +For HYGON 4 CPU, all branches can be flushed by setting ibpb_brtype=ibpb-all +or by using the default value instead of loading microcode. +See the following description of the kernel parameter ibpb_brtype. + The sysfs file showing SRSO mitigation status is: /sys/devices/system/cpu/vulnerabilities/spec_rstack_overflow @@ -79,6 +87,17 @@ The possible values in this file are: (spec_rstack_overflow=ibpb-vmexit) +In order to mitigate the attacks to the user space tasks, +IBPB must be set to flush all types of branches by setting kernel parameter ibpb_brtype. +The description of kernel parameter ibpb_brtype is as follows: +ibpb_brtype= [X86, HYGON only] + IBPB action control flag + Format: { ibpb-all | ibpb-ind } + ibpb-all -- IBPB flushes all types of branches, + this is the default value if the command line + does not specify the ibpb_brtype value. + ibpb-ind -- IBPB flushes only indirect branches. + In order to exploit vulnerability, an attacker needs to: - gain local access on the machine diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 51dfedc68d6a5c38f3fa3a8d37335f53593f720e..aad82d4539b4f19795d9c867c575262d86226ec4 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -878,6 +878,19 @@ static void __init retbleed_select_mitigation(void) break; } + /* Enhanced IBRS (eIBRS) is preferred on HYGON processors. */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + switch (spectre_v2_enabled) { + case SPECTRE_V2_EIBRS: + case SPECTRE_V2_EIBRS_RETPOLINE: + case SPECTRE_V2_EIBRS_LFENCE: + retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; + break; + default: + break; + } + } + switch (retbleed_mitigation) { case RETBLEED_MITIGATION_UNRET: setup_force_cpu_cap(X86_FEATURE_RETHUNK); @@ -1362,6 +1375,9 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_ return; case SPECTRE_V2_EIBRS_RETPOLINE: + /* Hygon Enhanced IBRS flushes RAS upon privilege level changes from low to high. */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return; case SPECTRE_V2_RETPOLINE: case SPECTRE_V2_LFENCE: case SPECTRE_V2_IBRS: @@ -1526,7 +1542,21 @@ static void __init spectre_v2_select_mitigation(void) * FIXME: Is this pointless for retbleed-affected AMD? */ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); - pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); + + /* Hygon Enhanced IBRS flushes RAS upon privilege level changes from low to high. */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + switch (spectre_v2_enabled) { + case SPECTRE_V2_EIBRS: + case SPECTRE_V2_EIBRS_RETPOLINE: + case SPECTRE_V2_EIBRS_LFENCE: + setup_clear_cpu_cap(X86_FEATURE_RSB_CTXSW); + break; + default: + pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); + break; + } + } else + pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); spectre_v2_determine_rsb_fill_type_at_vmexit(mode); @@ -2013,6 +2043,45 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) } } +/** + * enum ibpb_brtype_cmd - IBPB action control flag + * @IBPB_FLUSH_IND:IBPB command only flushes indirect branches in branch target buffer + * @IBPB_FLUSH_ALL:IBPB command flushes all types of branches in branch target buffer + */ +enum ibpb_brtype_cmd { + IBPB_FLUSH_IND, + IBPB_FLUSH_ALL, +}; +static enum ibpb_brtype_cmd ibpb_brtype __ro_after_init = IBPB_FLUSH_ALL; +/** + * The kernel parameter ibpb_brtype is used to control + * whether IBPB flushes all branches or indirect branches: + * ibpb_brtype= [X86, HYGON only] + * IBPB action control flag + * Format: { ibpb-all | ibpb-ind } + * ibpb-all -- IBPB flushes all types of branches,this is the default value. + * ibpb-ind -- IBPB flushes only indirect branches. + */ +static int __init ibpb_brtype_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!strcmp(str, "ibpb-all")) { + ibpb_brtype = IBPB_FLUSH_ALL; + pr_info("IBPB flushes all branches.\n"); + } else if (!strcmp(str, "ibpb-ind")) { + ibpb_brtype = IBPB_FLUSH_IND; + pr_info("IBPB flushes only indirect branches.\n"); + } else + pr_err("Ignoring unknown ibpb branch type option (%s).", str); + + return 0; +} +early_param("ibpb_brtype", ibpb_brtype_cmdline); + +#define IBPB_FLUSH_ALL_BIT 55 + void x86_spec_ctrl_setup_ap(void) { if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) @@ -2020,6 +2089,13 @@ void x86_spec_ctrl_setup_ap(void) if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) x86_amd_ssb_disable(); + + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + (boot_cpu_data.x86 == 0x18)) { + if ((boot_cpu_data.x86_model > 0x3) && + (ibpb_brtype == IBPB_FLUSH_ALL)) + msr_set_bit(MSR_ZEN4_BP_CFG, IBPB_FLUSH_ALL_BIT); + } } bool itlb_multihit_kvm_mitigation; @@ -2156,6 +2232,7 @@ enum srso_mitigation { SRSO_MITIGATION_SAFE_RET, SRSO_MITIGATION_IBPB, SRSO_MITIGATION_IBPB_ON_VMEXIT, + SRSO_MITIGATION_EIBRS, }; enum srso_mitigation_cmd { @@ -2171,7 +2248,8 @@ static const char * const srso_strings[] = { [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode", [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET", [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", - [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only" + [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only", + [SRSO_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", }; static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; @@ -2201,6 +2279,29 @@ early_param("spec_rstack_overflow", srso_parse_cmdline); #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." +/* + * ibpb_can_flush_all() - set IBPB flush type according to the cmdline param + * - and check whether IBPB can flush all branches + * @return: true when IBPB can flush all types of branches and + * false when IBPB can flush only indirect branches. + */ +bool ibpb_can_flush_all(void) +{ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + (boot_cpu_data.x86 == 0x18)) { + if (boot_cpu_data.x86_model <= 0x3) { + return true; + } else if (ibpb_brtype == IBPB_FLUSH_ALL) { + msr_set_bit(MSR_ZEN4_BP_CFG, IBPB_FLUSH_ALL_BIT); + return true; + } + return false; + } + + pr_err("WARNING: this ibpb check is only used for HYGON.\n"); + return false; +} + static void __init srso_select_mitigation(void) { bool has_microcode; @@ -2213,6 +2314,10 @@ static void __init srso_select_mitigation(void) * for guests to verify whether IBPB is a viable mitigation. */ has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode(); + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + has_microcode = ibpb_can_flush_all(); + if (!has_microcode) { pr_warn("IBPB-extending microcode not applied!\n"); pr_warn(SRSO_NOTICE); @@ -2241,6 +2346,21 @@ static void __init srso_select_mitigation(void) } } + /* Enhanced IBRS (eIBRS) is preferred on HYGON processors. */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + switch (spectre_v2_enabled) { + case SPECTRE_V2_EIBRS: + case SPECTRE_V2_EIBRS_RETPOLINE: + case SPECTRE_V2_EIBRS_LFENCE: + srso_mitigation = SRSO_MITIGATION_EIBRS; + pr_info("%s%s\n", srso_strings[srso_mitigation], + (has_microcode ? "" : ", no microcode")); + goto pred_cmd; + default: + break; + } + } + switch (srso_cmd) { case SRSO_CMD_OFF: goto pred_cmd; @@ -2496,11 +2616,21 @@ static ssize_t retbleed_show_state(char *buf) boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); - return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + return sysfs_emit(buf, "%s; SMT %s\n", + retbleed_strings[retbleed_mitigation], !sched_smt_active() ? "disabled" : + spectre_v2_in_eibrs_mode(spectre_v2_enabled) || spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? "enabled with STIBP protection" : "vulnerable"); + } + + return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], + !sched_smt_active() ? "disabled" : + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? + "enabled with STIBP protection" : "vulnerable"); } return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 8deb567c92a6d58ba337a9d2dc1419e0d6bea3d4..5934f8715e1fb50f08471437f116ca7ea0b3a9a5 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -954,7 +954,10 @@ static void init_speculation_control(struct cpuinfo_x86 *c) * Intel CPUs, for finer-grained selection of what's available. */ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { - set_cpu_cap(c, X86_FEATURE_IBRS); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + set_cpu_cap(c, X86_FEATURE_IBRS_ENHANCED); + else + set_cpu_cap(c, X86_FEATURE_IBRS); set_cpu_cap(c, X86_FEATURE_IBPB); set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); } @@ -967,7 +970,10 @@ static void init_speculation_control(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_SSBD); if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { - set_cpu_cap(c, X86_FEATURE_IBRS); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + set_cpu_cap(c, X86_FEATURE_IBRS_ENHANCED); + else + set_cpu_cap(c, X86_FEATURE_IBRS); set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); } diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index f57257465d0c58ef45b44cca858d90bcbd0b337b..0959269727c28b53ac125e13b4b917ff74c26836 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -24,6 +24,7 @@ #include "cpu.h" +#define IBRS_FLUSH_RAS_BIT 56 #define APICID_SOCKET_ID_BIT 6 /* @@ -330,12 +331,29 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) setup_clear_cpu_cap(X86_FEATURE_CSV3); } +/* + * cpu_vul_mitigation() - set the basic configuration to mitigate CPU vulnerabilities + */ +static void cpu_vul_mitigation(void) +{ + /* + * Automatically flush RAS upon protection level changes from low to high. + * it's used as rsb mitigation instead of RSB filling. + */ + if ((boot_cpu_data.x86 == 0x18) && + (boot_cpu_data.x86_model > 0x3)) { + msr_set_bit(MSR_ZEN4_BP_CFG, IBRS_FLUSH_RAS_BIT); + } +} + static void early_init_hygon(struct cpuinfo_x86 *c) { u32 dummy; early_init_hygon_mc(c); + cpu_vul_mitigation(); + set_cpu_cap(c, X86_FEATURE_K8); rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);