2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

bpf-fixes

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+soXsSLHKoYyzcli6rmadz2vbToFAmiNNksACgkQ6rmadz2v
 bTrKRhAAnju4bbFRHU88Y68p6Meq/jxgjxHZAkTqZA0Nvbu2cItPRL7XHAAhTWE7
 OBEIm3UKCH4gs4fY8rDHiIgnnaQavXUmvXZblOIOjxnqRKJpU3px+wwJvGFq5Enq
 WP6UZV8tj+O2tNfNNYS+mgQvvIpUISHGpKimvx7ede3e1U3cJBkppbT3gooMHYuc
 5s1QtYHWaPY/1DpkHgqJ2UPGcbT9/HSPGMHRNaHKjQTcNcLcrj7RRjchgXqcc7Vs
 hVijvVrLiuK0MyU42ritmaqvjjgD6hKPZguRQe2/hAtrOo0Alf+4mXkMgam7simN
 iHfGc7nhw1xAFTPj4WXahja89G00FdDN5NR37Rgurm/i2fY7BuXAkMjiMiwGB3C3
 jk2wG3RSifYeC2rxhkYJdqcx8Cz6m+pjgyJ2o9Jy5dn426VXg/kzkUXpl6u5jaPZ
 SmKoo9Xu1r7xqTaUc9kk8pJI5Xt9vD5oQjF2KQuPZXxNidiwW6k2OGbW+wF26nEi
 Q6pfDu3pvHAd/UE6cD5yFe97o3Cc2XfGwI/Sv2k99UVPvNcvfAvVo9fsItHBhCPn
 zHkihW2S0zmbBlhcrB+PrLclNgLleP9JukFN+5scc0a9lbQxIm6v2TNKGlBfDQtO
 I+Kn266oqT4BEgnQGlCQquINnQAdmS8VMnnunGOu6+rwPUtkI7E=
 =XLHS
 -----END PGP SIGNATURE-----

Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Pull bpf fixes from Alexei Starovoitov:

 - Fix kCFI failures in JITed BPF code on arm64 (Sami Tolvanen, Puranjay
   Mohan, Mark Rutland, Maxwell Bland)

 - Disallow tail calls between BPF programs that use different cgroup
   local storage maps to prevent out-of-bounds access (Daniel Borkmann)

 - Fix unaligned access in flow_dissector and netfilter BPF programs
   (Paul Chaignon)

 - Avoid possible use of uninitialized mod_len in libbpf (Achill
   Gilgenast)

* tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  selftests/bpf: Test for unaligned flow_dissector ctx access
  bpf: Improve ctx access verifier error message
  bpf: Check netfilter ctx accesses are aligned
  bpf: Check flow_dissector ctx accesses are aligned
  arm64/cfi,bpf: Support kCFI + BPF on arm64
  cfi: Move BPF CFI types and helpers to generic code
  cfi: add C CFI type macro
  libbpf: Avoid possible use of uninitialized mod_len
  bpf: Fix oob access in cgroup local storage
  bpf: Move cgroup iterator helpers to bpf.h
  bpf: Move bpf map owner out of common struct
  bpf: Add cookie object to bpf maps
This commit is contained in:
Linus Torvalds 2025-08-01 17:13:26 -07:00
commit a6923c06a3
18 changed files with 229 additions and 176 deletions

View File

@ -0,0 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM64_CFI_H
#define _ASM_ARM64_CFI_H
#define __bpfcall
#endif /* _ASM_ARM64_CFI_H */

View File

@ -10,6 +10,7 @@
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/cfi.h>
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/printk.h> #include <linux/printk.h>
@ -114,6 +115,14 @@ static inline void emit(const u32 insn, struct jit_ctx *ctx)
ctx->idx++; ctx->idx++;
} }
static inline void emit_u32_data(const u32 data, struct jit_ctx *ctx)
{
if (ctx->image != NULL && ctx->write)
ctx->image[ctx->idx] = data;
ctx->idx++;
}
static inline void emit_a64_mov_i(const int is64, const int reg, static inline void emit_a64_mov_i(const int is64, const int reg,
const s32 val, struct jit_ctx *ctx) const s32 val, struct jit_ctx *ctx)
{ {
@ -174,6 +183,12 @@ static inline void emit_bti(u32 insn, struct jit_ctx *ctx)
emit(insn, ctx); emit(insn, ctx);
} }
static inline void emit_kcfi(u32 hash, struct jit_ctx *ctx)
{
if (IS_ENABLED(CONFIG_CFI_CLANG))
emit_u32_data(hash, ctx);
}
/* /*
* Kernel addresses in the vmalloc space use at most 48 bits, and the * Kernel addresses in the vmalloc space use at most 48 bits, and the
* remaining bits are guaranteed to be 0x1. So we can compose the address * remaining bits are guaranteed to be 0x1. So we can compose the address
@ -503,7 +518,6 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
const u8 priv_sp = bpf2a64[PRIVATE_SP]; const u8 priv_sp = bpf2a64[PRIVATE_SP];
void __percpu *priv_stack_ptr; void __percpu *priv_stack_ptr;
const int idx0 = ctx->idx;
int cur_offset; int cur_offset;
/* /*
@ -529,6 +543,9 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
* *
*/ */
emit_kcfi(is_main_prog ? cfi_bpf_hash : cfi_bpf_subprog_hash, ctx);
const int idx0 = ctx->idx;
/* bpf function may be invoked by 3 instruction types: /* bpf function may be invoked by 3 instruction types:
* 1. bl, attached via freplace to bpf prog via short jump * 1. bl, attached via freplace to bpf prog via short jump
* 2. br, attached via freplace to bpf prog via long jump * 2. br, attached via freplace to bpf prog via long jump
@ -2146,9 +2163,9 @@ skip_init_ctx:
jit_data->ro_header = ro_header; jit_data->ro_header = ro_header;
} }
prog->bpf_func = (void *)ctx.ro_image; prog->bpf_func = (void *)ctx.ro_image + cfi_get_offset();
prog->jited = 1; prog->jited = 1;
prog->jited_len = prog_size; prog->jited_len = prog_size - cfi_get_offset();
if (!prog->is_func || extra_pass) { if (!prog->is_func || extra_pass) {
int i; int i;
@ -2527,6 +2544,12 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
/* return address locates above FP */ /* return address locates above FP */
retaddr_off = stack_size + 8; retaddr_off = stack_size + 8;
if (flags & BPF_TRAMP_F_INDIRECT) {
/*
* Indirect call for bpf_struct_ops
*/
emit_kcfi(cfi_get_func_hash(func_addr), ctx);
}
/* bpf trampoline may be invoked by 3 instruction types: /* bpf trampoline may be invoked by 3 instruction types:
* 1. bl, attached to bpf prog or kernel function via short jump * 1. bl, attached to bpf prog or kernel function via short jump
* 2. br, attached to bpf prog or kernel function via long jump * 2. br, attached to bpf prog or kernel function via long jump
@ -3045,6 +3068,7 @@ void bpf_jit_free(struct bpf_prog *prog)
sizeof(jit_data->header->size)); sizeof(jit_data->header->size));
kfree(jit_data); kfree(jit_data);
} }
prog->bpf_func -= cfi_get_offset();
hdr = bpf_jit_binary_pack_hdr(prog); hdr = bpf_jit_binary_pack_hdr(prog);
bpf_jit_binary_pack_free(hdr, NULL); bpf_jit_binary_pack_free(hdr, NULL);
priv_stack_ptr = prog->aux->priv_stack_ptr; priv_stack_ptr = prog->aux->priv_stack_ptr;

View File

@ -14,27 +14,11 @@ struct pt_regs;
#ifdef CONFIG_CFI_CLANG #ifdef CONFIG_CFI_CLANG
enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
#define __bpfcall #define __bpfcall
static inline int cfi_get_offset(void)
{
return 4;
}
#define cfi_get_offset cfi_get_offset
extern u32 cfi_bpf_hash;
extern u32 cfi_bpf_subprog_hash;
extern u32 cfi_get_func_hash(void *func);
#else #else
static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
{ {
return BUG_TRAP_TYPE_NONE; return BUG_TRAP_TYPE_NONE;
} }
#define cfi_bpf_hash 0U
#define cfi_bpf_subprog_hash 0U
static inline u32 cfi_get_func_hash(void *func)
{
return 0;
}
#endif /* CONFIG_CFI_CLANG */ #endif /* CONFIG_CFI_CLANG */
#endif /* _ASM_RISCV_CFI_H */ #endif /* _ASM_RISCV_CFI_H */

View File

@ -75,56 +75,3 @@ enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
return report_cfi_failure(regs, regs->epc, &target, type); return report_cfi_failure(regs, regs->epc, &target, type);
} }
#ifdef CONFIG_CFI_CLANG
struct bpf_insn;
/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
extern unsigned int __bpf_prog_runX(const void *ctx,
const struct bpf_insn *insn);
/*
* Force a reference to the external symbol so the compiler generates
* __kcfi_typid.
*/
__ADDRESSABLE(__bpf_prog_runX);
/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
asm (
" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
" .type cfi_bpf_hash,@object \n"
" .globl cfi_bpf_hash \n"
" .p2align 2, 0x0 \n"
"cfi_bpf_hash: \n"
" .word __kcfi_typeid___bpf_prog_runX \n"
" .size cfi_bpf_hash, 4 \n"
" .popsection \n"
);
/* Must match bpf_callback_t */
extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
__ADDRESSABLE(__bpf_callback_fn);
/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
asm (
" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
" .type cfi_bpf_subprog_hash,@object \n"
" .globl cfi_bpf_subprog_hash \n"
" .p2align 2, 0x0 \n"
"cfi_bpf_subprog_hash: \n"
" .word __kcfi_typeid___bpf_callback_fn \n"
" .size cfi_bpf_subprog_hash, 4 \n"
" .popsection \n"
);
u32 cfi_get_func_hash(void *func)
{
u32 hash;
if (get_kernel_nofault(hash, func - cfi_get_offset()))
return 0;
return hash;
}
#endif

View File

@ -116,8 +116,6 @@ struct pt_regs;
#ifdef CONFIG_CFI_CLANG #ifdef CONFIG_CFI_CLANG
enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
#define __bpfcall #define __bpfcall
extern u32 cfi_bpf_hash;
extern u32 cfi_bpf_subprog_hash;
static inline int cfi_get_offset(void) static inline int cfi_get_offset(void)
{ {
@ -135,6 +133,8 @@ static inline int cfi_get_offset(void)
#define cfi_get_offset cfi_get_offset #define cfi_get_offset cfi_get_offset
extern u32 cfi_get_func_hash(void *func); extern u32 cfi_get_func_hash(void *func);
#define cfi_get_func_hash cfi_get_func_hash
extern int cfi_get_func_arity(void *func); extern int cfi_get_func_arity(void *func);
#ifdef CONFIG_FINEIBT #ifdef CONFIG_FINEIBT
@ -153,12 +153,6 @@ static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
{ {
return BUG_TRAP_TYPE_NONE; return BUG_TRAP_TYPE_NONE;
} }
#define cfi_bpf_hash 0U
#define cfi_bpf_subprog_hash 0U
static inline u32 cfi_get_func_hash(void *func)
{
return 0;
}
static inline int cfi_get_func_arity(void *func) static inline int cfi_get_func_arity(void *func)
{ {
return 0; return 0;

View File

@ -1184,43 +1184,6 @@ bool cfi_bhi __ro_after_init = false;
#endif #endif
#ifdef CONFIG_CFI_CLANG #ifdef CONFIG_CFI_CLANG
struct bpf_insn;
/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
extern unsigned int __bpf_prog_runX(const void *ctx,
const struct bpf_insn *insn);
KCFI_REFERENCE(__bpf_prog_runX);
/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
asm (
" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
" .type cfi_bpf_hash,@object \n"
" .globl cfi_bpf_hash \n"
" .p2align 2, 0x0 \n"
"cfi_bpf_hash: \n"
" .long __kcfi_typeid___bpf_prog_runX \n"
" .size cfi_bpf_hash, 4 \n"
" .popsection \n"
);
/* Must match bpf_callback_t */
extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
KCFI_REFERENCE(__bpf_callback_fn);
/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
asm (
" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
" .type cfi_bpf_subprog_hash,@object \n"
" .globl cfi_bpf_subprog_hash \n"
" .p2align 2, 0x0 \n"
"cfi_bpf_subprog_hash: \n"
" .long __kcfi_typeid___bpf_callback_fn \n"
" .size cfi_bpf_subprog_hash, 4 \n"
" .popsection \n"
);
u32 cfi_get_func_hash(void *func) u32 cfi_get_func_hash(void *func)
{ {
u32 hash; u32 hash;

View File

@ -77,9 +77,6 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE]; extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype]) #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
#define for_each_cgroup_storage_type(stype) \
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
struct bpf_cgroup_storage_map; struct bpf_cgroup_storage_map;
struct bpf_storage_buffer { struct bpf_storage_buffer {
@ -510,8 +507,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
kernel_optval) ({ 0; }) kernel_optval) ({ 0; })
#define for_each_cgroup_storage_type(stype) for (; false; )
#endif /* CONFIG_CGROUP_BPF */ #endif /* CONFIG_CGROUP_BPF */
#endif /* _BPF_CGROUP_H */ #endif /* _BPF_CGROUP_H */

View File

@ -208,6 +208,20 @@ enum btf_field_type {
BPF_RES_SPIN_LOCK = (1 << 12), BPF_RES_SPIN_LOCK = (1 << 12),
}; };
enum bpf_cgroup_storage_type {
BPF_CGROUP_STORAGE_SHARED,
BPF_CGROUP_STORAGE_PERCPU,
__BPF_CGROUP_STORAGE_MAX
#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
};
#ifdef CONFIG_CGROUP_BPF
# define for_each_cgroup_storage_type(stype) \
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
#else
# define for_each_cgroup_storage_type(stype) for (; false; )
#endif /* CONFIG_CGROUP_BPF */
typedef void (*btf_dtor_kfunc_t)(void *); typedef void (*btf_dtor_kfunc_t)(void *);
struct btf_field_kptr { struct btf_field_kptr {
@ -260,6 +274,19 @@ struct bpf_list_node_kern {
void *owner; void *owner;
} __attribute__((aligned(8))); } __attribute__((aligned(8)));
/* 'Ownership' of program-containing map is claimed by the first program
* that is going to use this map or by the first program which FD is
* stored in the map to make sure that all callers and callees have the
* same prog type, JITed flag and xdp_has_frags flag.
*/
struct bpf_map_owner {
enum bpf_prog_type type;
bool jited;
bool xdp_has_frags;
u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
const struct btf_type *attach_func_proto;
};
struct bpf_map { struct bpf_map {
const struct bpf_map_ops *ops; const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta; struct bpf_map *inner_map_meta;
@ -292,24 +319,15 @@ struct bpf_map {
struct rcu_head rcu; struct rcu_head rcu;
}; };
atomic64_t writecnt; atomic64_t writecnt;
/* 'Ownership' of program-containing map is claimed by the first program spinlock_t owner_lock;
* that is going to use this map or by the first program which FD is struct bpf_map_owner *owner;
* stored in the map to make sure that all callers and callees have the
* same prog type, JITed flag and xdp_has_frags flag.
*/
struct {
const struct btf_type *attach_func_proto;
spinlock_t lock;
enum bpf_prog_type type;
bool jited;
bool xdp_has_frags;
} owner;
bool bypass_spec_v1; bool bypass_spec_v1;
bool frozen; /* write-once; write-protected by freeze_mutex */ bool frozen; /* write-once; write-protected by freeze_mutex */
bool free_after_mult_rcu_gp; bool free_after_mult_rcu_gp;
bool free_after_rcu_gp; bool free_after_rcu_gp;
atomic64_t sleepable_refcnt; atomic64_t sleepable_refcnt;
s64 __percpu *elem_count; s64 __percpu *elem_count;
u64 cookie; /* write-once */
}; };
static inline const char *btf_field_type_name(enum btf_field_type type) static inline const char *btf_field_type_name(enum btf_field_type type)
@ -1082,14 +1100,6 @@ struct bpf_prog_offload {
u32 jited_len; u32 jited_len;
}; };
enum bpf_cgroup_storage_type {
BPF_CGROUP_STORAGE_SHARED,
BPF_CGROUP_STORAGE_PERCPU,
__BPF_CGROUP_STORAGE_MAX
};
#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
/* The longest tracepoint has 12 args. /* The longest tracepoint has 12 args.
* See include/trace/bpf_probe.h * See include/trace/bpf_probe.h
*/ */
@ -2108,6 +2118,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags)
(BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
} }
static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map)
{
return kzalloc(sizeof(*map->owner), GFP_ATOMIC);
}
static inline void bpf_map_owner_free(struct bpf_map *map)
{
kfree(map->owner);
}
struct bpf_event_entry { struct bpf_event_entry {
struct perf_event *event; struct perf_event *event;
struct file *perf_file; struct file *perf_file;

View File

@ -11,16 +11,9 @@
#include <linux/module.h> #include <linux/module.h>
#include <asm/cfi.h> #include <asm/cfi.h>
#ifdef CONFIG_CFI_CLANG
extern bool cfi_warn; extern bool cfi_warn;
#ifndef cfi_get_offset
static inline int cfi_get_offset(void)
{
return 0;
}
#endif
#ifdef CONFIG_CFI_CLANG
enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr, enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr,
unsigned long *target, u32 type); unsigned long *target, u32 type);
@ -29,6 +22,44 @@ static inline enum bug_trap_type report_cfi_failure_noaddr(struct pt_regs *regs,
{ {
return report_cfi_failure(regs, addr, NULL, 0); return report_cfi_failure(regs, addr, NULL, 0);
} }
#ifndef cfi_get_offset
/*
* Returns the CFI prefix offset. By default, the compiler emits only
* a 4-byte CFI type hash before the function. If an architecture
* uses -fpatchable-function-entry=N,M where M>0 to change the prefix
* offset, they must override this function.
*/
static inline int cfi_get_offset(void)
{
return 4;
}
#endif
#ifndef cfi_get_func_hash
static inline u32 cfi_get_func_hash(void *func)
{
u32 hash;
if (get_kernel_nofault(hash, func - cfi_get_offset()))
return 0;
return hash;
}
#endif
/* CFI type hashes for BPF function types */
extern u32 cfi_bpf_hash;
extern u32 cfi_bpf_subprog_hash;
#else /* CONFIG_CFI_CLANG */
static inline int cfi_get_offset(void) { return 0; }
static inline u32 cfi_get_func_hash(void *func) { return 0; }
#define cfi_bpf_hash 0U
#define cfi_bpf_subprog_hash 0U
#endif /* CONFIG_CFI_CLANG */ #endif /* CONFIG_CFI_CLANG */
#ifdef CONFIG_ARCH_USES_CFI_TRAPS #ifdef CONFIG_ARCH_USES_CFI_TRAPS

View File

@ -41,5 +41,28 @@
SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif #endif
#else /* __ASSEMBLY__ */
#ifdef CONFIG_CFI_CLANG
#define DEFINE_CFI_TYPE(name, func) \
/* \
* Force a reference to the function so the compiler generates \
* __kcfi_typeid_<func>. \
*/ \
__ADDRESSABLE(func); \
/* u32 name __ro_after_init = __kcfi_typeid_<func> */ \
extern u32 name; \
asm ( \
" .pushsection .data..ro_after_init,\"aw\",\%progbits \n" \
" .type " #name ",\%object \n" \
" .globl " #name " \n" \
" .p2align 2, 0x0 \n" \
#name ": \n" \
" .4byte __kcfi_typeid_" #func " \n" \
" .size " #name ", 4 \n" \
" .popsection \n" \
);
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _LINUX_CFI_TYPES_H */ #endif /* _LINUX_CFI_TYPES_H */

View File

@ -2377,28 +2377,44 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
const struct bpf_prog *fp) const struct bpf_prog *fp)
{ {
enum bpf_prog_type prog_type = resolve_prog_type(fp); enum bpf_prog_type prog_type = resolve_prog_type(fp);
bool ret;
struct bpf_prog_aux *aux = fp->aux; struct bpf_prog_aux *aux = fp->aux;
enum bpf_cgroup_storage_type i;
bool ret = false;
u64 cookie;
if (fp->kprobe_override) if (fp->kprobe_override)
return false; return ret;
spin_lock(&map->owner.lock); spin_lock(&map->owner_lock);
if (!map->owner.type) { /* There's no owner yet where we could check for compatibility. */
/* There's no owner yet where we could check for if (!map->owner) {
* compatibility. map->owner = bpf_map_owner_alloc(map);
*/ if (!map->owner)
map->owner.type = prog_type; goto err;
map->owner.jited = fp->jited; map->owner->type = prog_type;
map->owner.xdp_has_frags = aux->xdp_has_frags; map->owner->jited = fp->jited;
map->owner.attach_func_proto = aux->attach_func_proto; map->owner->xdp_has_frags = aux->xdp_has_frags;
map->owner->attach_func_proto = aux->attach_func_proto;
for_each_cgroup_storage_type(i) {
map->owner->storage_cookie[i] =
aux->cgroup_storage[i] ?
aux->cgroup_storage[i]->cookie : 0;
}
ret = true; ret = true;
} else { } else {
ret = map->owner.type == prog_type && ret = map->owner->type == prog_type &&
map->owner.jited == fp->jited && map->owner->jited == fp->jited &&
map->owner.xdp_has_frags == aux->xdp_has_frags; map->owner->xdp_has_frags == aux->xdp_has_frags;
for_each_cgroup_storage_type(i) {
if (!ret)
break;
cookie = aux->cgroup_storage[i] ?
aux->cgroup_storage[i]->cookie : 0;
ret = map->owner->storage_cookie[i] == cookie ||
!cookie;
}
if (ret && if (ret &&
map->owner.attach_func_proto != aux->attach_func_proto) { map->owner->attach_func_proto != aux->attach_func_proto) {
switch (prog_type) { switch (prog_type) {
case BPF_PROG_TYPE_TRACING: case BPF_PROG_TYPE_TRACING:
case BPF_PROG_TYPE_LSM: case BPF_PROG_TYPE_LSM:
@ -2411,8 +2427,8 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
} }
} }
} }
spin_unlock(&map->owner.lock); err:
spin_unlock(&map->owner_lock);
return ret; return ret;
} }

View File

@ -37,6 +37,7 @@
#include <linux/trace_events.h> #include <linux/trace_events.h>
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <linux/overflow.h> #include <linux/overflow.h>
#include <linux/cookie.h>
#include <net/netfilter/nf_bpf_link.h> #include <net/netfilter/nf_bpf_link.h>
#include <net/netkit.h> #include <net/netkit.h>
@ -53,6 +54,7 @@
#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
DEFINE_PER_CPU(int, bpf_prog_active); DEFINE_PER_CPU(int, bpf_prog_active);
DEFINE_COOKIE(bpf_map_cookie);
static DEFINE_IDR(prog_idr); static DEFINE_IDR(prog_idr);
static DEFINE_SPINLOCK(prog_idr_lock); static DEFINE_SPINLOCK(prog_idr_lock);
static DEFINE_IDR(map_idr); static DEFINE_IDR(map_idr);
@ -885,6 +887,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
security_bpf_map_free(map); security_bpf_map_free(map);
bpf_map_release_memcg(map); bpf_map_release_memcg(map);
bpf_map_owner_free(map);
bpf_map_free(map); bpf_map_free(map);
} }
@ -979,12 +982,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
struct bpf_map *map = filp->private_data; struct bpf_map *map = filp->private_data;
u32 type = 0, jited = 0; u32 type = 0, jited = 0;
if (map_type_contains_progs(map)) { spin_lock(&map->owner_lock);
spin_lock(&map->owner.lock); if (map->owner) {
type = map->owner.type; type = map->owner->type;
jited = map->owner.jited; jited = map->owner->jited;
spin_unlock(&map->owner.lock);
} }
spin_unlock(&map->owner_lock);
seq_printf(m, seq_printf(m,
"map_type:\t%u\n" "map_type:\t%u\n"
@ -1487,10 +1490,14 @@ static int map_create(union bpf_attr *attr, bool kernel)
if (err < 0) if (err < 0)
goto free_map; goto free_map;
preempt_disable();
map->cookie = gen_cookie_next(&bpf_map_cookie);
preempt_enable();
atomic64_set(&map->refcnt, 1); atomic64_set(&map->refcnt, 1);
atomic64_set(&map->usercnt, 1); atomic64_set(&map->usercnt, 1);
mutex_init(&map->freeze_mutex); mutex_init(&map->freeze_mutex);
spin_lock_init(&map->owner.lock); spin_lock_init(&map->owner_lock);
if (attr->btf_key_type_id || attr->btf_value_type_id || if (attr->btf_key_type_id || attr->btf_value_type_id ||
/* Even the map's value is a kernel's struct, /* Even the map's value is a kernel's struct,

View File

@ -21445,7 +21445,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
&target_size); &target_size);
if (cnt == 0 || cnt >= INSN_BUF_SIZE || if (cnt == 0 || cnt >= INSN_BUF_SIZE ||
(ctx_field_size && !target_size)) { (ctx_field_size && !target_size)) {
verifier_bug(env, "error during ctx access conversion"); verifier_bug(env, "error during ctx access conversion (%d)", cnt);
return -EFAULT; return -EFAULT;
} }

View File

@ -5,6 +5,8 @@
* Copyright (C) 2022 Google LLC * Copyright (C) 2022 Google LLC
*/ */
#include <linux/bpf.h>
#include <linux/cfi_types.h>
#include <linux/cfi.h> #include <linux/cfi.h>
bool cfi_warn __ro_after_init = IS_ENABLED(CONFIG_CFI_PERMISSIVE); bool cfi_warn __ro_after_init = IS_ENABLED(CONFIG_CFI_PERMISSIVE);
@ -27,6 +29,19 @@ enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr,
return BUG_TRAP_TYPE_BUG; return BUG_TRAP_TYPE_BUG;
} }
/*
* Declare two non-existent functions with types that match bpf_func_t and
* bpf_callback_t pointers, and use DEFINE_CFI_TYPE to define type hash
* variables for each function type. The cfi_bpf_* variables are used by
* arch-specific BPF JIT implementations to ensure indirectly callable JIT
* code has matching CFI type hashes.
*/
extern typeof(*(bpf_func_t)0) __bpf_prog_runX;
DEFINE_CFI_TYPE(cfi_bpf_hash, __bpf_prog_runX);
extern typeof(*(bpf_callback_t)0) __bpf_callback_fn;
DEFINE_CFI_TYPE(cfi_bpf_subprog_hash, __bpf_callback_fn);
#ifdef CONFIG_ARCH_USES_CFI_TRAPS #ifdef CONFIG_ARCH_USES_CFI_TRAPS
static inline unsigned long trap_address(s32 *p) static inline unsigned long trap_address(s32 *p)
{ {

View File

@ -9458,6 +9458,9 @@ static bool flow_dissector_is_valid_access(int off, int size,
if (off < 0 || off >= sizeof(struct __sk_buff)) if (off < 0 || off >= sizeof(struct __sk_buff))
return false; return false;
if (off % size != 0)
return false;
if (type == BPF_WRITE) if (type == BPF_WRITE)
return false; return false;

View File

@ -296,6 +296,9 @@ static bool nf_is_valid_access(int off, int size, enum bpf_access_type type,
if (off < 0 || off >= sizeof(struct bpf_nf_ctx)) if (off < 0 || off >= sizeof(struct bpf_nf_ctx))
return false; return false;
if (off % size != 0)
return false;
if (type == BPF_WRITE) if (type == BPF_WRITE)
return false; return false;

View File

@ -10096,7 +10096,7 @@ static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
enum bpf_attach_type attach_type, enum bpf_attach_type attach_type,
int *btf_obj_fd, int *btf_type_id) int *btf_obj_fd, int *btf_type_id)
{ {
int ret, i, mod_len; int ret, i, mod_len = 0;
const char *fn_name, *mod_name = NULL; const char *fn_name, *mod_name = NULL;
fn_name = strchr(attach_name, ':'); fn_name = strchr(attach_name, ':');

View File

@ -1,10 +1,12 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/ctx.c */ /* Converted from tools/testing/selftests/bpf/verifier/ctx.c */
#include <linux/bpf.h> #include "vmlinux.h"
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include "bpf_misc.h" #include "bpf_misc.h"
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
SEC("tc") SEC("tc")
__description("context stores via BPF_ATOMIC") __description("context stores via BPF_ATOMIC")
__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed") __failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed")
@ -243,4 +245,23 @@ narrow_load("sockops", bpf_sock_ops, skb_data);
narrow_load("sockops", bpf_sock_ops, skb_data_end); narrow_load("sockops", bpf_sock_ops, skb_data_end);
narrow_load("sockops", bpf_sock_ops, skb_hwtstamp); narrow_load("sockops", bpf_sock_ops, skb_hwtstamp);
#define unaligned_access(type, ctx, field) \
SEC(type) \
__description("unaligned access on field " #field " of " #ctx) \
__failure __msg("invalid bpf_context access") \
__naked void unaligned_ctx_access_##ctx##field(void) \
{ \
asm volatile (" \
r1 = *(u%[size] *)(r1 + %[off]); \
r0 = 0; \
exit;" \
: \
: __imm_const(size, sizeof_field(struct ctx, field) * 8), \
__imm_const(off, offsetof(struct ctx, field) + 1) \
: __clobber_all); \
}
unaligned_access("flow_dissector", __sk_buff, data);
unaligned_access("netfilter", bpf_nf_ctx, skb);
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";