Skip to content
5 changes: 5 additions & 0 deletions arch/arm64/net/bpf_jit.h
Original file line number Diff line number Diff line change
Expand Up @@ -325,4 +325,9 @@
#define A64_MRS_SP_EL0(Rt) \
aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_SP_EL0)

/* Barriers */
#define A64_SB aarch64_insn_get_sb_value()
#define A64_DSB_NSH (aarch64_insn_get_dsb_base_value() | 0x7 << 8)
#define A64_ISB aarch64_insn_get_isb_value()

#endif /* _BPF_JIT_H */
28 changes: 18 additions & 10 deletions arch/arm64/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1581,17 +1581,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
return ret;
break;

/* speculation barrier */
/* speculation barrier against v1 and v4 */
case BPF_ST | BPF_NOSPEC:
/*
* Nothing required here.
*
* In case of arm64, we rely on the firmware mitigation of
* Speculative Store Bypass as controlled via the ssbd kernel
* parameter. Whenever the mitigation is enabled, it works
* for all of the kernel code with no need to provide any
* additional instructions.
*/
if (alternative_has_cap_likely(ARM64_HAS_SB)) {
emit(A64_SB, ctx);
} else {
emit(A64_DSB_NSH, ctx);
emit(A64_ISB, ctx);
}
break;

/* ST: *(size *)(dst + off) = imm */
Expand Down Expand Up @@ -2762,6 +2759,17 @@ bool bpf_jit_supports_percpu_insn(void)
return true;
}

bool bpf_jit_bypass_spec_v4(void)
{
/* In case of arm64, we rely on the firmware mitigation of Speculative
* Store Bypass as controlled via the ssbd kernel parameter. Whenever
* the mitigation is enabled, it works for all of the kernel code with
* no need to provide any additional instructions. Therefore, skip
* inserting nospec insns against Spectre v4.
*/
return true;
}

bool bpf_jit_inlines_helper_call(s32 imm)
{
switch (imm) {
Expand Down
80 changes: 60 additions & 20 deletions arch/powerpc/net/bpf_jit_comp64.c
Original file line number Diff line number Diff line change
Expand Up @@ -363,6 +363,23 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
return 0;
}

bool bpf_jit_bypass_spec_v1(void)
{
#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR));
#else
return true;
#endif
}

bool bpf_jit_bypass_spec_v4(void)
{
return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_STF_BARRIER) &&
stf_barrier_type_get() != STF_BARRIER_NONE);
}

/*
* We spill into the redzone always, even if the bpf program has its own stackframe.
* Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
Expand Down Expand Up @@ -390,6 +407,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
u32 *addrs, int pass, bool extra_pass)
{
enum stf_barrier_type stf_barrier = stf_barrier_type_get();
bool sync_emitted, ori31_emitted;
const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len;
int i, ret;
Expand Down Expand Up @@ -783,30 +801,52 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code

/*
* BPF_ST NOSPEC (speculation barrier)
*
* The following must act as a barrier against both Spectre v1
* and v4 if we requested both mitigations. Therefore, also emit
* 'isync; sync' on E500 or 'ori31' on BOOK3S_64 in addition to
* the insns needed for a Spectre v4 barrier.
*
* If we requested only !bypass_spec_v1 OR only !bypass_spec_v4,
* we can skip the respective other barrier type as an
* optimization.
*/
case BPF_ST | BPF_NOSPEC:
if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
!security_ftr_enabled(SEC_FTR_STF_BARRIER))
break;

switch (stf_barrier) {
case STF_BARRIER_EIEIO:
EMIT(PPC_RAW_EIEIO() | 0x02000000);
break;
case STF_BARRIER_SYNC_ORI:
sync_emitted = false;
ori31_emitted = false;
#ifdef CONFIG_PPC_E500
if (!bpf_jit_bypass_spec_v1()) {
EMIT(PPC_RAW_ISYNC());
EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
EMIT(PPC_RAW_ORI(_R31, _R31, 0));
break;
case STF_BARRIER_FALLBACK:
ctx->seen |= SEEN_FUNC;
PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
EMIT(PPC_RAW_MTCTR(_R12));
EMIT(PPC_RAW_BCTRL());
break;
case STF_BARRIER_NONE:
break;
sync_emitted = true;
}
#endif
if (!bpf_jit_bypass_spec_v4()) {
switch (stf_barrier) {
case STF_BARRIER_EIEIO:
EMIT(PPC_RAW_EIEIO() | 0x02000000);
break;
case STF_BARRIER_SYNC_ORI:
if (!sync_emitted)
EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
EMIT(PPC_RAW_ORI(_R31, _R31, 0));
ori31_emitted = true;
break;
case STF_BARRIER_FALLBACK:
ctx->seen |= SEEN_FUNC;
PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
EMIT(PPC_RAW_MTCTR(_R12));
EMIT(PPC_RAW_BCTRL());
break;
case STF_BARRIER_NONE:
break;
}
}
#ifdef CONFIG_PPC_BOOK3S_64
if (!bpf_jit_bypass_spec_v1() && !ori31_emitted)
EMIT(PPC_RAW_ORI(_R31, _R31, 0));
#endif
break;

/*
Expand Down
11 changes: 9 additions & 2 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -2268,6 +2268,9 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
return ret;
}

bool bpf_jit_bypass_spec_v1(void);
bool bpf_jit_bypass_spec_v4(void);

#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
extern struct mutex bpf_stats_enabled_mutex;
Expand Down Expand Up @@ -2455,12 +2458,16 @@ static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)

static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
{
return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
return bpf_jit_bypass_spec_v1() ||
cpu_mitigations_off() ||
bpf_token_capable(token, CAP_PERFMON);
}

static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
{
return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
return bpf_jit_bypass_spec_v4() ||
cpu_mitigations_off() ||
bpf_token_capable(token, CAP_PERFMON);
}

int bpf_map_new_fd(struct bpf_map *map, int flags);
Expand Down
3 changes: 2 additions & 1 deletion include/linux/bpf_verifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -576,7 +576,8 @@ struct bpf_insn_aux_data {
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
bool nospec; /* do not execute this instruction speculatively */
bool nospec_result; /* result is unsafe under speculation, nospec must follow */
bool zext_dst; /* this insn zero extends dst reg */
bool needs_zext; /* alu op needs to clear upper bits */
bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
Expand Down
2 changes: 1 addition & 1 deletion include/linux/filter.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ struct ctl_table_header;
#define BPF_CALL_ARGS 0xe0

/* unused opcode to mark speculation barrier for mitigating
* Speculative Store Bypass
* Spectre v1 and v4
*/
#define BPF_NOSPEC 0xc0

Expand Down
32 changes: 24 additions & 8 deletions kernel/bpf/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2102,14 +2102,15 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
#undef COND_JMP
/* ST, STX and LDX*/
ST_NOSPEC:
/* Speculation barrier for mitigating Speculative Store Bypass.
* In case of arm64, we rely on the firmware mitigation as
* controlled via the ssbd kernel parameter. Whenever the
* mitigation is enabled, it works for all of the kernel code
* with no need to provide any additional instructions here.
* In case of x86, we use 'lfence' insn for mitigation. We
* reuse preexisting logic from Spectre v1 mitigation that
* happens to produce the required code on x86 for v4 as well.
/* Speculation barrier for mitigating Speculative Store Bypass,
* Bounds-Check Bypass and Type Confusion. In case of arm64, we
* rely on the firmware mitigation as controlled via the ssbd
* kernel parameter. Whenever the mitigation is enabled, it
* works for all of the kernel code with no need to provide any
* additional instructions here. In case of x86, we use 'lfence'
* insn for mitigation. We reuse preexisting logic from Spectre
* v1 mitigation that happens to produce the required code on
* x86 for v4 as well.
*/
barrier_nospec();
CONT;
Expand Down Expand Up @@ -3034,6 +3035,21 @@ bool __weak bpf_jit_needs_zext(void)
return false;
}

/* By default, enable the verifier's mitigations against Spectre v1 and v4 for
* all archs. The value returned must not change at runtime as there is
* currently no support for reloading programs that were loaded without
* mitigations.
*/
bool __weak bpf_jit_bypass_spec_v1(void)
{
return false;
}

bool __weak bpf_jit_bypass_spec_v4(void)
{
return false;
}

/* Return true if the JIT inlines the call to the helper corresponding to
* the imm.
*
Expand Down
Loading
Loading