x86/bugs/AMD: Add support to disable RDS on Fam[15,16,17]h if requested
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Thu, 26 Apr 2018 02:04:24 +0000 (22:04 -0400)
committerBen Hutchings <ben@decadent.org.uk>
Wed, 3 Oct 2018 03:09:40 +0000 (04:09 +0100)
commit 764f3c21588a059cd783c6ba0734d4db2d72822d upstream.

AMD does not need the Speculative Store Bypass mitigation to be enabled.

The parameters for this are already available and can be done via MSR
C001_1020. Each family uses a different bit in that MSR for this.

[ tglx: Expose the bit mask via a variable and move the actual MSR fiddling
   into the bugs code as that's the right thing to do and also required
to prepare for dynamic enable/disable ]

Suggested-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
[bwh: Backported to 3.16:
 - Renumber the feature bit
 - We don't have __ro_after_init
 - Adjust filename, context]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c

index 100c6b1737d2cff9a2b847cdb50dca89fb78c6d8..e2924bbeb8c734b69a03946e29b9100a09031c79 100644 (file)
 #define X86_FEATURE_USE_IBPB   (7*32+12) /* "" Indirect Branch Prediction Barrier enabled */
 #define X86_FEATURE_USE_IBRS_FW (7*32+13) /* "" Use IBRS during runtime firmware calls */
 #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE (7*32+14) /* "" Disable Speculative Store Bypass. */
+#define X86_FEATURE_AMD_RDS    (7*32+15)  /* "" AMD RDS implementation */
 
 #define X86_FEATURE_RETPOLINE  (7*32+29) /* "" Generic Retpoline mitigation for Spectre variant 2 */
 #define X86_FEATURE_RETPOLINE_AMD (7*32+30) /* "" AMD Retpoline mitigation for Spectre variant 2 */
index f3aef8eaf8b873e5b86916517d6cdb2dec4c61be..b00b7c5b10a55614b45488d6954a0e0f8a2b4b72 100644 (file)
@@ -199,6 +199,10 @@ enum ssb_mitigation {
        SPEC_STORE_BYPASS_DISABLE,
 };
 
+/* AMD specific Speculative Store Bypass MSR data */
+extern u64 x86_amd_ls_cfg_base;
+extern u64 x86_amd_ls_cfg_rds_mask;
+
 extern char __indirect_thunk_start[];
 extern char __indirect_thunk_end[];
 
index ae2832700bb58c20f4d2fc41256445ca925b1ab3..913a74b67d1bbea7f08ca06f69b75e797d0ab9ee 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/processor.h>
 #include <asm/apic.h>
 #include <asm/cpu.h>
+#include <asm/nospec-branch.h>
 #include <asm/pci-direct.h>
 
 #ifdef CONFIG_X86_64
@@ -470,6 +471,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                va_align.mask     = (upperbit - 1) & PAGE_MASK;
                va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
        }
+
+       if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+               unsigned int bit;
+
+               switch (c->x86) {
+               case 0x15: bit = 54; break;
+               case 0x16: bit = 33; break;
+               case 0x17: bit = 10; break;
+               default: return;
+               }
+               /*
+                * Try to cache the base value so further operations can
+                * avoid RMW. If that faults, do not enable RDS.
+                */
+               if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+                       setup_force_cpu_cap(X86_FEATURE_RDS);
+                       setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
+                       x86_amd_ls_cfg_rds_mask = 1ULL << bit;
+               }
+       }
 }
 
 static void early_init_amd(struct cpuinfo_x86 *c)
@@ -780,6 +801,11 @@ static void init_amd(struct cpuinfo_x86 *c)
                set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
 
        rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
+
+       if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
+               set_cpu_cap(c, X86_FEATURE_RDS);
+               set_cpu_cap(c, X86_FEATURE_AMD_RDS);
+       }
 }
 
 #ifdef CONFIG_X86_32
index 419a7c57839b0cb3f70900b6f748c96c6025dabf..692e38f6203d6ca17197902d76651ade12314d1e 100644 (file)
@@ -40,6 +40,13 @@ static u64 x86_spec_ctrl_base;
  */
 static u64 x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
 
+/*
+ * AMD specific MSR info for Speculative Store Bypass control.
+ * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
+ */
+u64 x86_amd_ls_cfg_base;
+u64 x86_amd_ls_cfg_rds_mask;
+
 #ifdef CONFIG_X86_32
 
 static double __initdata x = 4195835.0;
@@ -109,7 +116,8 @@ void __init check_bugs(void)
 
        /*
         * Read the SPEC_CTRL MSR to account for reserved bits which may
-        * have unknown values.
+        * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+        * init code as it is not enumerated and depends on the family.
         */
        if (boot_cpu_has(X86_FEATURE_IBRS))
                rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
@@ -216,6 +224,14 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
 
+static void x86_amd_rds_enable(void)
+{
+       u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
+
+       if (boot_cpu_has(X86_FEATURE_AMD_RDS))
+               wrmsrl(MSR_AMD64_LS_CFG, msrval);
+}
+
 #ifdef RETPOLINE
 static bool spectre_v2_bad_module;
 
@@ -481,6 +497,11 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
 
        switch (cmd) {
        case SPEC_STORE_BYPASS_CMD_AUTO:
+               /*
+                * AMD platforms by default don't need SSB mitigation.
+                */
+               if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+                       break;
        case SPEC_STORE_BYPASS_CMD_ON:
                mode = SPEC_STORE_BYPASS_DISABLE;
                break;
@@ -507,6 +528,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
                        x86_spec_ctrl_set(SPEC_CTRL_RDS);
                        break;
                case X86_VENDOR_AMD:
+                       x86_amd_rds_enable();
                        break;
                }
        }
@@ -528,6 +550,9 @@ void x86_spec_ctrl_setup_ap(void)
 {
        if (boot_cpu_has(X86_FEATURE_IBRS))
                x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+
+       if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+               x86_amd_rds_enable();
 }
 
 #ifdef CONFIG_SYSFS
index c50e2133ad11021d0efe26789e3c623c9ce4c18a..0d322a3d610a4f67cf1372f1aff9754354a61993 100644 (file)
@@ -819,6 +819,10 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
        { X86_VENDOR_CENTAUR,   5,                                      },
        { X86_VENDOR_INTEL,     5,                                      },
        { X86_VENDOR_NSC,       5,                                      },
+       { X86_VENDOR_AMD,       0x12,                                   },
+       { X86_VENDOR_AMD,       0x11,                                   },
+       { X86_VENDOR_AMD,       0x10,                                   },
+       { X86_VENDOR_AMD,       0xf,                                    },
        { X86_VENDOR_ANY,       4,                                      },
        {}
 };