bpf: fix a memory leak in the LRU and LRU_PERCPU hash maps
commit b34ffb0c6d23583830f9327864b9c1f486003305 upstream.
The LRU and LRU_PERCPU maps allocate a new element on update before locking the
target hash table bucket. Right after that the maps try to lock the bucket.
If this fails, then maps return -EBUSY to the caller without releasing the
allocated element. This makes the element untracked: it doesn't belong to
either of free lists, and it doesn't belong to the hash table, so can't be
re-used; this eventually leads to the permanent -ENOMEM on LRU map updates,
which is unexpected. Fix this by returning the element to the local free list
if bucket locking fails.
Fixes: 20b6cc34ea ("bpf: Avoid hashtab deadlock with map_locked")
Signed-off-by: Anton Protopopov <aspsk@isovalent.com>
Link: https://lore.kernel.org/r/20230522154558.2166815-1-aspsk@isovalent.com
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
177ee41f61
commit
1a9e80f757
1 changed files with 4 additions and 2 deletions
|
|
@ -1203,7 +1203,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||||
|
|
||||||
ret = htab_lock_bucket(htab, b, hash, &flags);
|
ret = htab_lock_bucket(htab, b, hash, &flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto err_lock_bucket;
|
||||||
|
|
||||||
l_old = lookup_elem_raw(head, hash, key, key_size);
|
l_old = lookup_elem_raw(head, hash, key, key_size);
|
||||||
|
|
||||||
|
|
@ -1224,6 +1224,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||||
err:
|
err:
|
||||||
htab_unlock_bucket(htab, b, hash, flags);
|
htab_unlock_bucket(htab, b, hash, flags);
|
||||||
|
|
||||||
|
err_lock_bucket:
|
||||||
if (ret)
|
if (ret)
|
||||||
htab_lru_push_free(htab, l_new);
|
htab_lru_push_free(htab, l_new);
|
||||||
else if (l_old)
|
else if (l_old)
|
||||||
|
|
@ -1326,7 +1327,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||||
|
|
||||||
ret = htab_lock_bucket(htab, b, hash, &flags);
|
ret = htab_lock_bucket(htab, b, hash, &flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto err_lock_bucket;
|
||||||
|
|
||||||
l_old = lookup_elem_raw(head, hash, key, key_size);
|
l_old = lookup_elem_raw(head, hash, key, key_size);
|
||||||
|
|
||||||
|
|
@ -1349,6 +1350,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||||
ret = 0;
|
ret = 0;
|
||||||
err:
|
err:
|
||||||
htab_unlock_bucket(htab, b, hash, flags);
|
htab_unlock_bucket(htab, b, hash, flags);
|
||||||
|
err_lock_bucket:
|
||||||
if (l_new)
|
if (l_new)
|
||||||
bpf_lru_push_free(&htab->lru, &l_new->lru_node);
|
bpf_lru_push_free(&htab->lru, &l_new->lru_node);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue