bpf: Introduce BPF nospec instruction for mitigating Spectre v4
authorDaniel Borkmann <daniel@iogearbox.net>
Tue, 13 Jul 2021 08:18:31 +0000 (08:18 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 4 Aug 2021 10:47:56 +0000 (12:47 +0200)
[ Upstream commit f5e81d1117501546b7be050c5fbafa6efd2c722c ]

In case of JITs, each of the JIT backends compiles the BPF nospec instruction
/either/ to a machine instruction which emits a speculation barrier /or/ to
/no/ machine instruction in case the underlying architecture is not affected
by Speculative Store Bypass or has different mitigations in place already.

This covers both x86 and (implicitly) arm64: In case of x86, we use 'lfence'
instruction for mitigation. In case of arm64, we rely on the firmware mitigation
as controlled via the ssbd kernel parameter. Whenever the mitigation is enabled,
it works for all of the kernel code with no need to provide any additional
instructions here (hence only comment in arm64 JIT). Other archs can follow
as needed. The BPF nospec instruction is specifically targeting Spectre v4
since i) we don't use a serialization barrier for the Spectre v1 case, and
ii) mitigation instructions for v1 and v4 might be different on some archs.

The BPF nospec is required for a future commit, where the BPF verifier does
annotate intermediate BPF programs with speculation barriers.

Co-developed-by: Piotr Krysiuk <piotras@gmail.com>
Co-developed-by: Benedict Schlueter <benedict.schlueter@rub.de>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Piotr Krysiuk <piotras@gmail.com>
Signed-off-by: Benedict Schlueter <benedict.schlueter@rub.de>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
14 files changed:
arch/arm/net/bpf_jit_32.c
arch/arm64/net/bpf_jit_comp.c
arch/mips/net/ebpf_jit.c
arch/powerpc/net/bpf_jit_comp32.c
arch/powerpc/net/bpf_jit_comp64.c
arch/riscv/net/bpf_jit_comp32.c
arch/riscv/net/bpf_jit_comp64.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/net/bpf_jit_comp_64.c
arch/x86/net/bpf_jit_comp.c
arch/x86/net/bpf_jit_comp32.c
include/linux/filter.h
kernel/bpf/core.c
kernel/bpf/disasm.c

index 897634d0a67ca34b338399d93fb1d51f86c5e58a..a951276f05475ab8520a666257435b943b09d575 100644 (file)
@@ -1602,6 +1602,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
                emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
                break;
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_W:
        case BPF_ST | BPF_MEM | BPF_H:
index f7b194878a99a0931ef43250ad5bb1b04498517b..5a876af34230c8b4d82676473232b82cd3953c4f 100644 (file)
@@ -829,6 +829,19 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
                        return ret;
                break;
 
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               /*
+                * Nothing required here.
+                *
+                * In case of arm64, we rely on the firmware mitigation of
+                * Speculative Store Bypass as controlled via the ssbd kernel
+                * parameter. Whenever the mitigation is enabled, it works
+                * for all of the kernel code with no need to provide any
+                * additional instructions.
+                */
+               break;
+
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_W:
        case BPF_ST | BPF_MEM | BPF_H:
index 939dd06764bc9ff91f8540af452026f7327de8cc..3a73e937571217a6925a5c27cf96ac2f642fdb77 100644 (file)
@@ -1355,6 +1355,9 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
                }
                break;
 
+       case BPF_ST | BPF_NOSPEC: /* speculation barrier */
+               break;
+
        case BPF_ST | BPF_B | BPF_MEM:
        case BPF_ST | BPF_H | BPF_MEM:
        case BPF_ST | BPF_W | BPF_MEM:
index 68476780047acc6315109e4b571f637035ba4fd5..6c0915285b1715e9c3babebba8fc9be4a4a62c9f 100644 (file)
@@ -737,6 +737,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                        }
                        break;
 
+               /*
+                * BPF_ST NOSPEC (speculation barrier)
+                */
+               case BPF_ST | BPF_NOSPEC:
+                       break;
+
                /*
                 * BPF_ST(X)
                 */
index 94411af24013f4a498f1cabd76c164644253b50d..d3ad8dfba1f699671764dfddc5ee5557bec336fc 100644 (file)
@@ -627,6 +627,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                        }
                        break;
 
+               /*
+                * BPF_ST NOSPEC (speculation barrier)
+                */
+               case BPF_ST | BPF_NOSPEC:
+                       break;
+
                /*
                 * BPF_ST(X)
                 */
index 81de865f4c7c3592e6d730b2d3f723e9cacf09e7..e6497424cbf60b07e9d844d66bc2ac0e46453eb7 100644 (file)
@@ -1251,6 +1251,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
                        return -1;
                break;
 
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
+
        case BPF_ST | BPF_MEM | BPF_B:
        case BPF_ST | BPF_MEM | BPF_H:
        case BPF_ST | BPF_MEM | BPF_W:
index 87e3bf5b9086dde64ef2460694a1cc7b4f6360d1..3af4131c22c7a9ecd0676964fef3eaf172c511c3 100644 (file)
@@ -939,6 +939,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
                emit_ld(rd, 0, RV_REG_T1, ctx);
                break;
 
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
+
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_B:
                emit_imm(RV_REG_T1, imm, ctx);
index 2ae419f5115a5af1cd6f2af488547191dd9f66f9..88419263a89a967bb6b40e7af668b5be2159dae1 100644 (file)
@@ -1153,6 +1153,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                        break;
                }
                break;
+       /*
+        * BPF_NOSPEC (speculation barrier)
+        */
+       case BPF_ST | BPF_NOSPEC:
+               break;
        /*
         * BPF_ST(X)
         */
index 4b8d3c65d2666e3858432ced5d4ce8b72f78c771..9a2f20cbd48b7c1c9ecf6c0e054c3e16bd1a7381 100644 (file)
@@ -1287,6 +1287,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                        return 1;
                break;
        }
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_W:
        case BPF_ST | BPF_MEM | BPF_H:
index 66e304a84deb00f17179a839707f7f17cd5c1ab6..ee9971bbe034ac968cf8f3c2da2e5cf56ff50e53 100644 (file)
@@ -1235,6 +1235,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                        }
                        break;
 
+                       /* speculation barrier */
+               case BPF_ST | BPF_NOSPEC:
+                       if (boot_cpu_has(X86_FEATURE_XMM2))
+                               /* Emit 'lfence' */
+                               EMIT3(0x0F, 0xAE, 0xE8);
+                       break;
+
                        /* ST: *(u8*)(dst_reg + off) = imm */
                case BPF_ST | BPF_MEM | BPF_B:
                        if (is_ereg(dst_reg))
index 3da88ded6ee39fc00d859f8cb594c343f7985bd0..3bfda5f502cb85c1f458c2cc3f49b1aa5b51e259 100644 (file)
@@ -1886,6 +1886,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                        i++;
                        break;
                }
+               /* speculation barrier */
+               case BPF_ST | BPF_NOSPEC:
+                       if (boot_cpu_has(X86_FEATURE_XMM2))
+                               /* Emit 'lfence' */
+                               EMIT3(0x0F, 0xAE, 0xE8);
+                       break;
                /* ST: *(u8*)(dst_reg + off) = imm */
                case BPF_ST | BPF_MEM | BPF_H:
                case BPF_ST | BPF_MEM | BPF_B:
index 9a09547bc7bae49f9f004df37cbe90ac68a8d48c..16e5cebea82ca885e1f0b6b842eecbc7ab0b7636 100644 (file)
@@ -73,6 +73,11 @@ struct ctl_table_header;
 /* unused opcode to mark call to interpreter with arguments */
 #define BPF_CALL_ARGS  0xe0
 
+/* unused opcode to mark speculation barrier for mitigating
+ * Speculative Store Bypass
+ */
+#define BPF_NOSPEC     0xc0
+
 /* As per nm, we expose JITed images as text (code) section for
  * kallsyms. That way, tools like perf can find it to match
  * addresses.
@@ -390,6 +395,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
                .off   = 0,                                     \
                .imm   = 0 })
 
+/* Speculation barrier */
+
+#define BPF_ST_NOSPEC()                                                \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ST | BPF_NOSPEC,                   \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
 /* Internal classic blocks for direct assignment */
 
 #define __BPF_STMT(CODE, K)                                    \
index 9b15774983738db74e13786fcb66adf450140378..b1a5fc04492bd0febe0b57016a69d8875941bddc 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/perf_event.h>
 #include <linux/extable.h>
 #include <linux/log2.h>
+
+#include <asm/barrier.h>
 #include <asm/unaligned.h>
 
 /* Registers */
@@ -1377,6 +1379,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
                /* Non-UAPI available opcodes. */
                [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
                [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
+               [BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
                [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
                [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
                [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
@@ -1621,7 +1624,21 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
        COND_JMP(s, JSGE, >=)
        COND_JMP(s, JSLE, <=)
 #undef COND_JMP
-       /* STX and ST and LDX*/
+       /* ST, STX and LDX*/
+       ST_NOSPEC:
+               /* Speculation barrier for mitigating Speculative Store Bypass.
+                * In case of arm64, we rely on the firmware mitigation as
+                * controlled via the ssbd kernel parameter. Whenever the
+                * mitigation is enabled, it works for all of the kernel code
+                * with no need to provide any additional instructions here.
+                * In case of x86, we use 'lfence' insn for mitigation. We
+                * reuse preexisting logic from Spectre v1 mitigation that
+                * happens to produce the required code on x86 for v4 as well.
+                */
+#ifdef CONFIG_X86
+               barrier_nospec();
+#endif
+               CONT;
 #define LDST(SIZEOP, SIZE)                                             \
        STX_MEM_##SIZEOP:                                               \
                *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
index bbfc6bb7924007ef0d904b54ecc2dee217064d40..ca3cd9aaa6ced0e65bad5617a805f9ca4ad9a715 100644 (file)
@@ -206,15 +206,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
                        verbose(cbs->private_data, "BUG_%02x\n", insn->code);
                }
        } else if (class == BPF_ST) {
-               if (BPF_MODE(insn->code) != BPF_MEM) {
+               if (BPF_MODE(insn->code) == BPF_MEM) {
+                       verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
+                               insn->code,
+                               bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
+                               insn->dst_reg,
+                               insn->off, insn->imm);
+               } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
+                       verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
+               } else {
                        verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
-                       return;
                }
-               verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
-                       insn->code,
-                       bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
-                       insn->dst_reg,
-                       insn->off, insn->imm);
        } else if (class == BPF_LDX) {
                if (BPF_MODE(insn->code) != BPF_MEM) {
                        verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);