diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c1bd1bd10506..942f9ac9fa7b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -266,6 +266,13 @@ static inline bool map_value_has_kptrs(const struct bpf_map *map) return !IS_ERR_OR_NULL(map->kptr_off_tab); } +/* 'dst' must be a temporary buffer and should not point to memory that is being + * used in parallel by a bpf program or bpf syscall, otherwise the access from + * the bpf program or bpf syscall may be corrupted by the reinitialization, + * leading to weird problems. Even 'dst' is newly-allocated from bpf memory + * allocator, it is still possible for 'dst' to be used in parallel by a bpf + * program or bpf syscall. + */ static inline void check_and_init_map_value(struct bpf_map *map, void *dst) { if (unlikely(map_value_has_spin_lock(map))) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index c4811984fafa..4a3d0a744702 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1010,8 +1010,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, l_new = ERR_PTR(-ENOMEM); goto dec_count; } - check_and_init_map_value(&htab->map, - l_new->key + round_up(key_size, 8)); } memcpy(l_new->key, key, key_size); @@ -1603,6 +1601,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, else copy_map_value(map, value, l->key + roundup_key_size); + /* Zeroing special fields in the temp buffer */ check_and_init_map_value(map, value); } @@ -1803,6 +1802,7 @@ again_nocopy: true); else copy_map_value(map, dst_val, value); + /* Zeroing special fields in the temp buffer */ check_and_init_map_value(map, dst_val); } if (do_delete) { diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 6187c28d266f..ace303a220ae 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -143,7 +143,7 @@ static void *__alloc(struct bpf_mem_cache *c, int node) return obj; } - return kmalloc_node(c->unit_size, flags, node); + return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node); } static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)