From 8e369c77040c137cf5303b3e606265fcaa8bc791 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 14 Jun 2023 16:16:09 +0000 Subject: [PATCH] Revert "bpf, sockmap: Convert schedule_work into delayed_work" This reverts commit 9f4d7efb33453a009d1df098a885b012ce93da2d. It breaks the Android KABI and will be brought back at a later time when it is safe to do so. Bug: 161946584 Change-Id: Ic3e8a533b0958aea3b2f58af8aa8292377e78ace Signed-off-by: Greg Kroah-Hartman --- include/linux/skmsg.h | 2 +- net/core/skmsg.c | 21 +++++++-------------- net/core/sock_map.c | 3 +-- 3 files changed, 9 insertions(+), 17 deletions(-) diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 904ff9a32ad6..84f787416a54 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -105,7 +105,7 @@ struct sk_psock { struct proto *sk_proto; struct mutex work_mutex; struct sk_psock_work_state work_state; - struct delayed_work work; + struct work_struct work; struct rcu_work rwork; }; diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 6a9b794861f3..2b6d9519ff29 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -481,7 +481,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, } out: if (psock->work_state.skb && copied > 0) - schedule_delayed_work(&psock->work, 0); + schedule_work(&psock->work); return copied; } EXPORT_SYMBOL_GPL(sk_msg_recvmsg); @@ -639,8 +639,7 @@ static void sk_psock_skb_state(struct sk_psock *psock, static void sk_psock_backlog(struct work_struct *work) { - struct delayed_work *dwork = to_delayed_work(work); - struct sk_psock *psock = container_of(dwork, struct sk_psock, work); + struct sk_psock *psock = container_of(work, struct sk_psock, work); struct sk_psock_work_state *state = &psock->work_state; struct sk_buff *skb = NULL; bool ingress; @@ -680,12 +679,6 @@ start: if (ret == -EAGAIN) { sk_psock_skb_state(psock, state, skb, len, off); - - /* Delay slightly to prioritize any - * other work that might be here. - */ - if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) - schedule_delayed_work(&psock->work, 1); goto end; } /* Hard errors break pipe and stop xmit. */ @@ -740,7 +733,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node) INIT_LIST_HEAD(&psock->link); spin_lock_init(&psock->link_lock); - INIT_DELAYED_WORK(&psock->work, sk_psock_backlog); + INIT_WORK(&psock->work, sk_psock_backlog); mutex_init(&psock->work_mutex); INIT_LIST_HEAD(&psock->ingress_msg); spin_lock_init(&psock->ingress_lock); @@ -829,7 +822,7 @@ static void sk_psock_destroy(struct work_struct *work) sk_psock_done_strp(psock); - cancel_delayed_work_sync(&psock->work); + cancel_work_sync(&psock->work); mutex_destroy(&psock->work_mutex); psock_progs_drop(&psock->progs); @@ -944,7 +937,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb) } skb_queue_tail(&psock_other->ingress_skb, skb); - schedule_delayed_work(&psock_other->work, 0); + schedule_work(&psock_other->work); spin_unlock_bh(&psock_other->ingress_lock); return 0; } @@ -1024,7 +1017,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, spin_lock_bh(&psock->ingress_lock); if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { skb_queue_tail(&psock->ingress_skb, skb); - schedule_delayed_work(&psock->work, 0); + schedule_work(&psock->work); err = 0; } spin_unlock_bh(&psock->ingress_lock); @@ -1055,7 +1048,7 @@ static void sk_psock_write_space(struct sock *sk) psock = sk_psock(sk); if (likely(psock)) { if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) - schedule_delayed_work(&psock->work, 0); + schedule_work(&psock->work); write_space = psock->saved_write_space; } rcu_read_unlock(); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index d38267201892..a68a7290a3b2 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -1624,10 +1624,9 @@ void sock_map_close(struct sock *sk, long timeout) rcu_read_unlock(); sk_psock_stop(psock); release_sock(sk); - cancel_delayed_work_sync(&psock->work); + cancel_work_sync(&psock->work); sk_psock_put(sk, psock); } - /* Make sure we do not recurse. This is a bug. * Leak the socket instead of crashing on a stack overflow. */