diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 1f3b89c885cc..a2b374372363 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -51,7 +51,8 @@ struct vhost_vsock { struct hlist_node hash; struct vhost_work send_pkt_work; - struct sk_buff_head send_pkt_queue; /* host->guest pending packets */ + spinlock_t send_pkt_list_lock; + struct list_head send_pkt_list; /* host->guest pending packets */ atomic_t queued_replies; @@ -107,31 +108,40 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, vhost_disable_notify(&vsock->dev, vq); do { - struct virtio_vsock_hdr *hdr; - size_t iov_len, payload_len; + struct virtio_vsock_pkt *pkt; struct iov_iter iov_iter; - u32 flags_to_restore = 0; - struct sk_buff *skb; unsigned out, in; size_t nbytes; + size_t iov_len, payload_len; int head; + u32 flags_to_restore = 0; - skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue); - - if (!skb) { + spin_lock_bh(&vsock->send_pkt_list_lock); + if (list_empty(&vsock->send_pkt_list)) { + spin_unlock_bh(&vsock->send_pkt_list_lock); vhost_enable_notify(&vsock->dev, vq); break; } + pkt = list_first_entry(&vsock->send_pkt_list, + struct virtio_vsock_pkt, list); + list_del_init(&pkt->list); + spin_unlock_bh(&vsock->send_pkt_list_lock); + head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, NULL, NULL); if (head < 0) { - virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); + spin_lock_bh(&vsock->send_pkt_list_lock); + list_add(&pkt->list, &vsock->send_pkt_list); + spin_unlock_bh(&vsock->send_pkt_list_lock); break; } if (head == vq->num) { - virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); + spin_lock_bh(&vsock->send_pkt_list_lock); + list_add(&pkt->list, &vsock->send_pkt_list); + spin_unlock_bh(&vsock->send_pkt_list_lock); + /* We cannot finish yet if more buffers snuck in while * re-enabling notify. */ @@ -143,27 +153,26 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, } if (out) { - kfree_skb(skb); + virtio_transport_free_pkt(pkt); vq_err(vq, "Expected 0 output buffers, got %u\n", out); break; } iov_len = iov_length(&vq->iov[out], in); - if (iov_len < sizeof(*hdr)) { - kfree_skb(skb); + if (iov_len < sizeof(pkt->hdr)) { + virtio_transport_free_pkt(pkt); vq_err(vq, "Buffer len [%zu] too small\n", iov_len); break; } iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len); - payload_len = skb->len; - hdr = virtio_vsock_hdr(skb); + payload_len = pkt->len - pkt->off; /* If the packet is greater than the space available in the * buffer, we split it using multiple buffers. */ - if (payload_len > iov_len - sizeof(*hdr)) { - payload_len = iov_len - sizeof(*hdr); + if (payload_len > iov_len - sizeof(pkt->hdr)) { + payload_len = iov_len - sizeof(pkt->hdr); /* As we are copying pieces of large packet's buffer to * small rx buffers, headers of packets in rx queue are @@ -176,30 +185,31 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, * bits set. After initialized header will be copied to * rx buffer, these required bits will be restored. */ - if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) { - hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); + if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) { + pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM; - if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) { - hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); + if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) { + pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR; } } } /* Set the correct length in the header */ - hdr->len = cpu_to_le32(payload_len); + pkt->hdr.len = cpu_to_le32(payload_len); - nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter); - if (nbytes != sizeof(*hdr)) { - kfree_skb(skb); + nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); + if (nbytes != sizeof(pkt->hdr)) { + virtio_transport_free_pkt(pkt); vq_err(vq, "Faulted on copying pkt hdr\n"); break; } - nbytes = copy_to_iter(skb->data, payload_len, &iov_iter); + nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len, + &iov_iter); if (nbytes != payload_len) { - kfree_skb(skb); + virtio_transport_free_pkt(pkt); vq_err(vq, "Faulted on copying pkt buf\n"); break; } @@ -207,28 +217,31 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, /* Deliver to monitoring devices all packets that we * will transmit. */ - virtio_transport_deliver_tap_pkt(skb); + virtio_transport_deliver_tap_pkt(pkt); - vhost_add_used(vq, head, sizeof(*hdr) + payload_len); + vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); added = true; - skb_pull(skb, payload_len); + pkt->off += payload_len; total_len += payload_len; /* If we didn't send all the payload we can requeue the packet * to send it with the next available buffer. */ - if (skb->len > 0) { - hdr->flags |= cpu_to_le32(flags_to_restore); + if (pkt->off < pkt->len) { + pkt->hdr.flags |= cpu_to_le32(flags_to_restore); - /* We are queueing the same skb to handle + /* We are queueing the same virtio_vsock_pkt to handle * the remaining bytes, and we want to deliver it * to monitoring devices in the next iteration. */ - virtio_vsock_skb_clear_tap_delivered(skb); - virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); + pkt->tap_delivered = false; + + spin_lock_bh(&vsock->send_pkt_list_lock); + list_add(&pkt->list, &vsock->send_pkt_list); + spin_unlock_bh(&vsock->send_pkt_list_lock); } else { - if (virtio_vsock_skb_reply(skb)) { + if (pkt->reply) { int val; val = atomic_dec_return(&vsock->queued_replies); @@ -240,7 +253,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, restart_tx = true; } - consume_skb(skb); + virtio_transport_free_pkt(pkt); } } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); if (added) @@ -265,26 +278,28 @@ static void vhost_transport_send_pkt_work(struct vhost_work *work) } static int -vhost_transport_send_pkt(struct sk_buff *skb) +vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) { - struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct vhost_vsock *vsock; - int len = skb->len; + int len = pkt->len; rcu_read_lock(); /* Find the vhost_vsock according to guest context id */ - vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid)); + vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); if (!vsock) { rcu_read_unlock(); - kfree_skb(skb); + virtio_transport_free_pkt(pkt); return -ENODEV; } - if (virtio_vsock_skb_reply(skb)) + if (pkt->reply) atomic_inc(&vsock->queued_replies); - virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb); + spin_lock_bh(&vsock->send_pkt_list_lock); + list_add_tail(&pkt->list, &vsock->send_pkt_list); + spin_unlock_bh(&vsock->send_pkt_list_lock); + vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); rcu_read_unlock(); @@ -295,8 +310,10 @@ static int vhost_transport_cancel_pkt(struct vsock_sock *vsk) { struct vhost_vsock *vsock; + struct virtio_vsock_pkt *pkt, *n; int cnt = 0; int ret = -ENODEV; + LIST_HEAD(freeme); rcu_read_lock(); @@ -305,7 +322,20 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk) if (!vsock) goto out; - cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue); + spin_lock_bh(&vsock->send_pkt_list_lock); + list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { + if (pkt->vsk != vsk) + continue; + list_move(&pkt->list, &freeme); + } + spin_unlock_bh(&vsock->send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } if (cnt) { struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; @@ -322,14 +352,12 @@ out: return ret; } -static struct sk_buff * -vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, +static struct virtio_vsock_pkt * +vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, unsigned int out, unsigned int in) { - struct virtio_vsock_hdr *hdr; + struct virtio_vsock_pkt *pkt; struct iov_iter iov_iter; - struct sk_buff *skb; - size_t payload_len; size_t nbytes; size_t len; @@ -338,48 +366,50 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, return NULL; } - len = iov_length(vq->iov, out); - - /* len contains both payload and hdr */ - skb = virtio_vsock_alloc_skb(len, GFP_KERNEL); - if (!skb) + pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); + if (!pkt) return NULL; + len = iov_length(vq->iov, out); iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len); - hdr = virtio_vsock_hdr(skb); - nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter); - if (nbytes != sizeof(*hdr)) { + nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); + if (nbytes != sizeof(pkt->hdr)) { vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n", - sizeof(*hdr), nbytes); - kfree_skb(skb); + sizeof(pkt->hdr), nbytes); + kfree(pkt); return NULL; } - payload_len = le32_to_cpu(hdr->len); + pkt->len = le32_to_cpu(pkt->hdr.len); /* No payload */ - if (!payload_len) - return skb; + if (!pkt->len) + return pkt; - /* The pkt is too big or the length in the header is invalid */ - if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE || - payload_len + sizeof(*hdr) > len) { - kfree_skb(skb); + /* The pkt is too big */ + if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) { + kfree(pkt); return NULL; } - virtio_vsock_skb_rx_put(skb); - - nbytes = copy_from_iter(skb->data, payload_len, &iov_iter); - if (nbytes != payload_len) { - vq_err(vq, "Expected %zu byte payload, got %zu bytes\n", - payload_len, nbytes); - kfree_skb(skb); + pkt->buf = kvmalloc(pkt->len, GFP_KERNEL); + if (!pkt->buf) { + kfree(pkt); return NULL; } - return skb; + pkt->buf_len = pkt->len; + + nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter); + if (nbytes != pkt->len) { + vq_err(vq, "Expected %u byte payload, got %zu bytes\n", + pkt->len, nbytes); + virtio_transport_free_pkt(pkt); + return NULL; + } + + return pkt; } /* Is there space left for replies to rx packets? */ @@ -466,9 +496,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) poll.work); struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, dev); + struct virtio_vsock_pkt *pkt; int head, pkts = 0, total_len = 0; unsigned int out, in; - struct sk_buff *skb; bool added = false; mutex_lock(&vq->mutex); @@ -481,8 +511,6 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) vhost_disable_notify(&vsock->dev, vq); do { - struct virtio_vsock_hdr *hdr; - if (!vhost_vsock_more_replies(vsock)) { /* Stop tx until the device processes already * pending replies. Leave tx virtqueue @@ -504,26 +532,24 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) break; } - skb = vhost_vsock_alloc_skb(vq, out, in); - if (!skb) { + pkt = vhost_vsock_alloc_pkt(vq, out, in); + if (!pkt) { vq_err(vq, "Faulted on pkt\n"); continue; } - total_len += sizeof(*hdr) + skb->len; + total_len += sizeof(pkt->hdr) + pkt->len; /* Deliver to monitoring devices all received packets */ - virtio_transport_deliver_tap_pkt(skb); - - hdr = virtio_vsock_hdr(skb); + virtio_transport_deliver_tap_pkt(pkt); /* Only accept correctly addressed packets */ - if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid && - le64_to_cpu(hdr->dst_cid) == + if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid && + le64_to_cpu(pkt->hdr.dst_cid) == vhost_transport_get_local_cid()) - virtio_transport_recv_pkt(&vhost_transport, skb); + virtio_transport_recv_pkt(&vhost_transport, pkt); else - kfree_skb(skb); + virtio_transport_free_pkt(pkt); vhost_add_used(vq, head, 0); added = true; @@ -667,7 +693,8 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) VHOST_VSOCK_WEIGHT, true, NULL); file->private_data = vsock; - skb_queue_head_init(&vsock->send_pkt_queue); + spin_lock_init(&vsock->send_pkt_list_lock); + INIT_LIST_HEAD(&vsock->send_pkt_list); vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); return 0; @@ -733,7 +760,16 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) vhost_vsock_flush(vsock); vhost_dev_stop(&vsock->dev); - virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue); + spin_lock_bh(&vsock->send_pkt_list_lock); + while (!list_empty(&vsock->send_pkt_list)) { + struct virtio_vsock_pkt *pkt; + + pkt = list_first_entry(&vsock->send_pkt_list, + struct virtio_vsock_pkt, list); + list_del_init(&pkt->list); + virtio_transport_free_pkt(pkt); + } + spin_unlock_bh(&vsock->send_pkt_list_lock); vhost_dev_cleanup(&vsock->dev); kfree(vsock->dev.vqs); diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 79d6f6eabd99..de584a776908 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -7,109 +7,6 @@ #include #include -#define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr)) - -struct virtio_vsock_skb_cb { - bool reply; - bool tap_delivered; -}; - -#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb)) - -static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb) -{ - return (struct virtio_vsock_hdr *)skb->head; -} - -static inline bool virtio_vsock_skb_reply(struct sk_buff *skb) -{ - return VIRTIO_VSOCK_SKB_CB(skb)->reply; -} - -static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb) -{ - VIRTIO_VSOCK_SKB_CB(skb)->reply = true; -} - -static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb) -{ - return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered; -} - -static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb) -{ - VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true; -} - -static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb) -{ - VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false; -} - -static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb) -{ - u32 len; - - len = le32_to_cpu(virtio_vsock_hdr(skb)->len); - - if (len > 0) - skb_put(skb, len); -} - -static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask) -{ - struct sk_buff *skb; - - if (size < VIRTIO_VSOCK_SKB_HEADROOM) - return NULL; - - skb = alloc_skb(size, mask); - if (!skb) - return NULL; - - skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM); - return skb; -} - -static inline void -virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb) -{ - spin_lock_bh(&list->lock); - __skb_queue_head(list, skb); - spin_unlock_bh(&list->lock); -} - -static inline void -virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb) -{ - spin_lock_bh(&list->lock); - __skb_queue_tail(list, skb); - spin_unlock_bh(&list->lock); -} - -static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list) -{ - struct sk_buff *skb; - - spin_lock_bh(&list->lock); - skb = __skb_dequeue(list); - spin_unlock_bh(&list->lock); - - return skb; -} - -static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list) -{ - spin_lock_bh(&list->lock); - __skb_queue_purge(list); - spin_unlock_bh(&list->lock); -} - -static inline size_t virtio_vsock_skb_len(struct sk_buff *skb) -{ - return (size_t)(skb_end_pointer(skb) - skb->head); -} - #define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4) #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE virtio_transport_max_vsock_pkt_buf_size @@ -139,10 +36,23 @@ struct virtio_vsock_sock { u32 last_fwd_cnt; u32 rx_bytes; u32 buf_alloc; - struct sk_buff_head rx_queue; + struct list_head rx_queue; u32 msg_count; }; +struct virtio_vsock_pkt { + struct virtio_vsock_hdr hdr; + struct list_head list; + /* socket refcnt not held, only use for cancellation */ + struct vsock_sock *vsk; + void *buf; + u32 buf_len; + u32 len; + u32 off; + bool reply; + bool tap_delivered; +}; + struct virtio_vsock_pkt_info { u32 remote_cid, remote_port; struct vsock_sock *vsk; @@ -159,7 +69,7 @@ struct virtio_transport { struct vsock_transport transport; /* Takes ownership of the packet */ - int (*send_pkt)(struct sk_buff *skb); + int (*send_pkt)(struct virtio_vsock_pkt *pkt); }; ssize_t @@ -240,10 +150,11 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk, void virtio_transport_destruct(struct vsock_sock *vsk); void virtio_transport_recv_pkt(struct virtio_transport *t, - struct sk_buff *skb); -void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb); + struct virtio_vsock_pkt *pkt); +void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt); +void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt); u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted); void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit); -void virtio_transport_deliver_tap_pkt(struct sk_buff *skb); -int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list); +void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt); + #endif /* _LINUX_VIRTIO_VSOCK_H */ diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 16575ea83659..460e7fbb42da 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -42,7 +42,8 @@ struct virtio_vsock { bool tx_run; struct work_struct send_pkt_work; - struct sk_buff_head send_pkt_queue; + spinlock_t send_pkt_list_lock; + struct list_head send_pkt_list; atomic_t queued_replies; @@ -100,31 +101,41 @@ virtio_transport_send_pkt_work(struct work_struct *work) vq = vsock->vqs[VSOCK_VQ_TX]; for (;;) { + struct virtio_vsock_pkt *pkt; struct scatterlist hdr, buf, *sgs[2]; int ret, in_sg = 0, out_sg = 0; - struct sk_buff *skb; bool reply; - skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue); - if (!skb) + spin_lock_bh(&vsock->send_pkt_list_lock); + if (list_empty(&vsock->send_pkt_list)) { + spin_unlock_bh(&vsock->send_pkt_list_lock); break; + } - virtio_transport_deliver_tap_pkt(skb); - reply = virtio_vsock_skb_reply(skb); + pkt = list_first_entry(&vsock->send_pkt_list, + struct virtio_vsock_pkt, list); + list_del_init(&pkt->list); + spin_unlock_bh(&vsock->send_pkt_list_lock); - sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb))); + virtio_transport_deliver_tap_pkt(pkt); + + reply = pkt->reply; + + sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); sgs[out_sg++] = &hdr; - if (skb->len > 0) { - sg_init_one(&buf, skb->data, skb->len); + if (pkt->buf) { + sg_init_one(&buf, pkt->buf, pkt->len); sgs[out_sg++] = &buf; } - ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL); + ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); /* Usually this means that there is no more space available in * the vq */ if (ret < 0) { - virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); + spin_lock_bh(&vsock->send_pkt_list_lock); + list_add(&pkt->list, &vsock->send_pkt_list); + spin_unlock_bh(&vsock->send_pkt_list_lock); break; } @@ -153,32 +164,32 @@ out: } static int -virtio_transport_send_pkt(struct sk_buff *skb) +virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) { - struct virtio_vsock_hdr *hdr; struct virtio_vsock *vsock; - int len = skb->len; - - hdr = virtio_vsock_hdr(skb); + int len = pkt->len; rcu_read_lock(); vsock = rcu_dereference(the_virtio_vsock); if (!vsock) { - kfree_skb(skb); + virtio_transport_free_pkt(pkt); len = -ENODEV; goto out_rcu; } - if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) { - kfree_skb(skb); + if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) { + virtio_transport_free_pkt(pkt); len = -ENODEV; goto out_rcu; } - if (virtio_vsock_skb_reply(skb)) + if (pkt->reply) atomic_inc(&vsock->queued_replies); - virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb); + spin_lock_bh(&vsock->send_pkt_list_lock); + list_add_tail(&pkt->list, &vsock->send_pkt_list); + spin_unlock_bh(&vsock->send_pkt_list_lock); + queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); out_rcu: @@ -190,7 +201,9 @@ static int virtio_transport_cancel_pkt(struct vsock_sock *vsk) { struct virtio_vsock *vsock; + struct virtio_vsock_pkt *pkt, *n; int cnt = 0, ret; + LIST_HEAD(freeme); rcu_read_lock(); vsock = rcu_dereference(the_virtio_vsock); @@ -199,7 +212,20 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk) goto out_rcu; } - cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue); + spin_lock_bh(&vsock->send_pkt_list_lock); + list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { + if (pkt->vsk != vsk) + continue; + list_move(&pkt->list, &freeme); + } + spin_unlock_bh(&vsock->send_pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + if (pkt->reply) + cnt++; + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } if (cnt) { struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; @@ -220,28 +246,38 @@ out_rcu: static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) { - int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM; - struct scatterlist pkt, *p; + int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; + struct virtio_vsock_pkt *pkt; + struct scatterlist hdr, buf, *sgs[2]; struct virtqueue *vq; - struct sk_buff *skb; int ret; vq = vsock->vqs[VSOCK_VQ_RX]; do { - skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL); - if (!skb) + pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); + if (!pkt) break; - memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM); - sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len); - p = &pkt; - ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL); - if (ret < 0) { - kfree_skb(skb); + pkt->buf = kmalloc(buf_len, GFP_KERNEL); + if (!pkt->buf) { + virtio_transport_free_pkt(pkt); break; } + pkt->buf_len = buf_len; + pkt->len = buf_len; + + sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); + sgs[0] = &hdr; + + sg_init_one(&buf, pkt->buf, buf_len); + sgs[1] = &buf; + ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL); + if (ret) { + virtio_transport_free_pkt(pkt); + break; + } vsock->rx_buf_nr++; } while (vq->num_free); if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) @@ -263,12 +299,12 @@ static void virtio_transport_tx_work(struct work_struct *work) goto out; do { - struct sk_buff *skb; + struct virtio_vsock_pkt *pkt; unsigned int len; virtqueue_disable_cb(vq); - while ((skb = virtqueue_get_buf(vq, &len)) != NULL) { - consume_skb(skb); + while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) { + virtio_transport_free_pkt(pkt); added = true; } } while (!virtqueue_enable_cb(vq)); @@ -493,7 +529,7 @@ static void virtio_transport_rx_work(struct work_struct *work) do { virtqueue_disable_cb(vq); for (;;) { - struct sk_buff *skb; + struct virtio_vsock_pkt *pkt; unsigned int len; if (!virtio_transport_more_replies(vsock)) { @@ -504,22 +540,23 @@ static void virtio_transport_rx_work(struct work_struct *work) goto out; } - skb = virtqueue_get_buf(vq, &len); - if (!skb) + pkt = virtqueue_get_buf(vq, &len); + if (!pkt) { break; + } vsock->rx_buf_nr--; /* Drop short/long packets */ - if (unlikely(len < sizeof(struct virtio_vsock_hdr) || - len > virtio_vsock_skb_len(skb))) { - kfree_skb(skb); + if (unlikely(len < sizeof(pkt->hdr) || + len > sizeof(pkt->hdr) + pkt->len)) { + virtio_transport_free_pkt(pkt); continue; } - virtio_vsock_skb_rx_put(skb); - virtio_transport_deliver_tap_pkt(skb); - virtio_transport_recv_pkt(&virtio_transport, skb); + pkt->len = len - sizeof(pkt->hdr); + virtio_transport_deliver_tap_pkt(pkt); + virtio_transport_recv_pkt(&virtio_transport, pkt); } } while (!virtqueue_enable_cb(vq)); @@ -587,7 +624,7 @@ static void virtio_vsock_vqs_start(struct virtio_vsock *vsock) static void virtio_vsock_vqs_del(struct virtio_vsock *vsock) { struct virtio_device *vdev = vsock->vdev; - struct sk_buff *skb; + struct virtio_vsock_pkt *pkt; /* Reset all connected sockets when the VQs disappear */ vsock_for_each_connected_socket(&virtio_transport.transport, @@ -614,16 +651,23 @@ static void virtio_vsock_vqs_del(struct virtio_vsock *vsock) virtio_reset_device(vdev); mutex_lock(&vsock->rx_lock); - while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) - kfree_skb(skb); + while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) + virtio_transport_free_pkt(pkt); mutex_unlock(&vsock->rx_lock); mutex_lock(&vsock->tx_lock); - while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX]))) - kfree_skb(skb); + while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX]))) + virtio_transport_free_pkt(pkt); mutex_unlock(&vsock->tx_lock); - virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue); + spin_lock_bh(&vsock->send_pkt_list_lock); + while (!list_empty(&vsock->send_pkt_list)) { + pkt = list_first_entry(&vsock->send_pkt_list, + struct virtio_vsock_pkt, list); + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + spin_unlock_bh(&vsock->send_pkt_list_lock); /* Delete virtqueues and flush outstanding callbacks if any */ vdev->config->del_vqs(vdev); @@ -660,7 +704,8 @@ static int virtio_vsock_probe(struct virtio_device *vdev) mutex_init(&vsock->tx_lock); mutex_init(&vsock->rx_lock); mutex_init(&vsock->event_lock); - skb_queue_head_init(&vsock->send_pkt_queue); + spin_lock_init(&vsock->send_pkt_list_lock); + INIT_LIST_HEAD(&vsock->send_pkt_list); INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); INIT_WORK(&vsock->event_work, virtio_transport_event_work); diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 3a3496b8dc18..b6ab3c030fa2 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -41,56 +41,53 @@ virtio_transport_get_ops(struct vsock_sock *vsk) return container_of(t, struct virtio_transport, transport); } -/* Returns a new packet on success, otherwise returns NULL. - * - * If NULL is returned, errp is set to a negative errno. - */ -static struct sk_buff * -virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info, +static struct virtio_vsock_pkt * +virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, size_t len, u32 src_cid, u32 src_port, u32 dst_cid, u32 dst_port) { - const size_t skb_len = VIRTIO_VSOCK_SKB_HEADROOM + len; - struct virtio_vsock_hdr *hdr; - struct sk_buff *skb; - void *payload; + struct virtio_vsock_pkt *pkt; int err; - skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL); - if (!skb) + pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); + if (!pkt) return NULL; - hdr = virtio_vsock_hdr(skb); - hdr->type = cpu_to_le16(info->type); - hdr->op = cpu_to_le16(info->op); - hdr->src_cid = cpu_to_le64(src_cid); - hdr->dst_cid = cpu_to_le64(dst_cid); - hdr->src_port = cpu_to_le32(src_port); - hdr->dst_port = cpu_to_le32(dst_port); - hdr->flags = cpu_to_le32(info->flags); - hdr->len = cpu_to_le32(len); + pkt->hdr.type = cpu_to_le16(info->type); + pkt->hdr.op = cpu_to_le16(info->op); + pkt->hdr.src_cid = cpu_to_le64(src_cid); + pkt->hdr.dst_cid = cpu_to_le64(dst_cid); + pkt->hdr.src_port = cpu_to_le32(src_port); + pkt->hdr.dst_port = cpu_to_le32(dst_port); + pkt->hdr.flags = cpu_to_le32(info->flags); + pkt->len = len; + pkt->hdr.len = cpu_to_le32(len); + pkt->reply = info->reply; + pkt->vsk = info->vsk; if (info->msg && len > 0) { - payload = skb_put(skb, len); - err = memcpy_from_msg(payload, info->msg, len); + pkt->buf = kmalloc(len, GFP_KERNEL); + if (!pkt->buf) + goto out_pkt; + + pkt->buf_len = len; + + err = memcpy_from_msg(pkt->buf, info->msg, len); if (err) goto out; if (msg_data_left(info->msg) == 0 && info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) { - hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); + pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); if (info->msg->msg_flags & MSG_EOR) - hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); + pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); } } - if (info->reply) - virtio_vsock_skb_set_reply(skb); - trace_virtio_transport_alloc_pkt(src_cid, src_port, dst_cid, dst_port, len, @@ -98,18 +95,19 @@ virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info, info->op, info->flags); - return skb; + return pkt; out: - kfree_skb(skb); + kfree(pkt->buf); +out_pkt: + kfree(pkt); return NULL; } /* Packet capture */ static struct sk_buff *virtio_transport_build_skb(void *opaque) { - struct virtio_vsock_hdr *pkt_hdr; - struct sk_buff *pkt = opaque; + struct virtio_vsock_pkt *pkt = opaque; struct af_vsockmon_hdr *hdr; struct sk_buff *skb; size_t payload_len; @@ -119,11 +117,10 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) * the payload length from the header and the buffer pointer taking * care of the offset in the original packet. */ - pkt_hdr = virtio_vsock_hdr(pkt); - payload_len = pkt->len; - payload_buf = pkt->data; + payload_len = le32_to_cpu(pkt->hdr.len); + payload_buf = pkt->buf + pkt->off; - skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len, + skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len, GFP_ATOMIC); if (!skb) return NULL; @@ -131,16 +128,16 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) hdr = skb_put(skb, sizeof(*hdr)); /* pkt->hdr is little-endian so no need to byteswap here */ - hdr->src_cid = pkt_hdr->src_cid; - hdr->src_port = pkt_hdr->src_port; - hdr->dst_cid = pkt_hdr->dst_cid; - hdr->dst_port = pkt_hdr->dst_port; + hdr->src_cid = pkt->hdr.src_cid; + hdr->src_port = pkt->hdr.src_port; + hdr->dst_cid = pkt->hdr.dst_cid; + hdr->dst_port = pkt->hdr.dst_port; hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO); - hdr->len = cpu_to_le16(sizeof(*pkt_hdr)); + hdr->len = cpu_to_le16(sizeof(pkt->hdr)); memset(hdr->reserved, 0, sizeof(hdr->reserved)); - switch (le16_to_cpu(pkt_hdr->op)) { + switch (le16_to_cpu(pkt->hdr.op)) { case VIRTIO_VSOCK_OP_REQUEST: case VIRTIO_VSOCK_OP_RESPONSE: hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT); @@ -161,7 +158,7 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) break; } - skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr)); + skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr)); if (payload_len) { skb_put_data(skb, payload_buf, payload_len); @@ -170,13 +167,13 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) return skb; } -void virtio_transport_deliver_tap_pkt(struct sk_buff *skb) +void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt) { - if (virtio_vsock_skb_tap_delivered(skb)) + if (pkt->tap_delivered) return; - vsock_deliver_tap(virtio_transport_build_skb, skb); - virtio_vsock_skb_set_tap_delivered(skb); + vsock_deliver_tap(virtio_transport_build_skb, pkt); + pkt->tap_delivered = true; } EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt); @@ -199,8 +196,8 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, u32 src_cid, src_port, dst_cid, dst_port; const struct virtio_transport *t_ops; struct virtio_vsock_sock *vvs; + struct virtio_vsock_pkt *pkt; u32 pkt_len = info->pkt_len; - struct sk_buff *skb; info->type = virtio_transport_get_type(sk_vsock(vsk)); @@ -231,47 +228,42 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW) return pkt_len; - skb = virtio_transport_alloc_skb(info, pkt_len, + pkt = virtio_transport_alloc_pkt(info, pkt_len, src_cid, src_port, dst_cid, dst_port); - if (!skb) { + if (!pkt) { virtio_transport_put_credit(vvs, pkt_len); return -ENOMEM; } - virtio_transport_inc_tx_pkt(vvs, skb); + virtio_transport_inc_tx_pkt(vvs, pkt); - return t_ops->send_pkt(skb); + return t_ops->send_pkt(pkt); } static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs, - struct sk_buff *skb) + struct virtio_vsock_pkt *pkt) { - if (vvs->rx_bytes + skb->len > vvs->buf_alloc) + if (vvs->rx_bytes + pkt->len > vvs->buf_alloc) return false; - vvs->rx_bytes += skb->len; + vvs->rx_bytes += pkt->len; return true; } static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs, - struct sk_buff *skb) + struct virtio_vsock_pkt *pkt) { - int len; - - len = skb_headroom(skb) - sizeof(struct virtio_vsock_hdr) - skb->len; - vvs->rx_bytes -= len; - vvs->fwd_cnt += len; + vvs->rx_bytes -= pkt->len; + vvs->fwd_cnt += pkt->len; } -void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb) +void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt) { - struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); - spin_lock_bh(&vvs->rx_lock); vvs->last_fwd_cnt = vvs->fwd_cnt; - hdr->fwd_cnt = cpu_to_le32(vvs->fwd_cnt); - hdr->buf_alloc = cpu_to_le32(vvs->buf_alloc); + pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt); + pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc); spin_unlock_bh(&vvs->rx_lock); } EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt); @@ -315,29 +307,29 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk, size_t len) { struct virtio_vsock_sock *vvs = vsk->trans; + struct virtio_vsock_pkt *pkt; size_t bytes, total = 0, off; - struct sk_buff *skb, *tmp; int err = -EFAULT; spin_lock_bh(&vvs->rx_lock); - skb_queue_walk_safe(&vvs->rx_queue, skb, tmp) { - off = 0; + list_for_each_entry(pkt, &vvs->rx_queue, list) { + off = pkt->off; if (total == len) break; - while (total < len && off < skb->len) { + while (total < len && off < pkt->len) { bytes = len - total; - if (bytes > skb->len - off) - bytes = skb->len - off; + if (bytes > pkt->len - off) + bytes = pkt->len - off; /* sk_lock is held by caller so no one else can dequeue. * Unlock rx_lock since memcpy_to_msg() may sleep. */ spin_unlock_bh(&vvs->rx_lock); - err = memcpy_to_msg(msg, skb->data + off, bytes); + err = memcpy_to_msg(msg, pkt->buf + off, bytes); if (err) goto out; @@ -364,38 +356,37 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, size_t len) { struct virtio_vsock_sock *vvs = vsk->trans; + struct virtio_vsock_pkt *pkt; size_t bytes, total = 0; - struct sk_buff *skb; - int err = -EFAULT; u32 free_space; + int err = -EFAULT; spin_lock_bh(&vvs->rx_lock); - while (total < len && !skb_queue_empty(&vvs->rx_queue)) { - skb = __skb_dequeue(&vvs->rx_queue); + while (total < len && !list_empty(&vvs->rx_queue)) { + pkt = list_first_entry(&vvs->rx_queue, + struct virtio_vsock_pkt, list); bytes = len - total; - if (bytes > skb->len) - bytes = skb->len; + if (bytes > pkt->len - pkt->off) + bytes = pkt->len - pkt->off; /* sk_lock is held by caller so no one else can dequeue. * Unlock rx_lock since memcpy_to_msg() may sleep. */ spin_unlock_bh(&vvs->rx_lock); - err = memcpy_to_msg(msg, skb->data, bytes); + err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes); if (err) goto out; spin_lock_bh(&vvs->rx_lock); total += bytes; - skb_pull(skb, bytes); - - if (skb->len == 0) { - virtio_transport_dec_rx_pkt(vvs, skb); - consume_skb(skb); - } else { - __skb_queue_head(&vvs->rx_queue, skb); + pkt->off += bytes; + if (pkt->off == pkt->len) { + virtio_transport_dec_rx_pkt(vvs, pkt); + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); } } @@ -427,10 +418,10 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, int flags) { struct virtio_vsock_sock *vvs = vsk->trans; + struct virtio_vsock_pkt *pkt; int dequeued_len = 0; size_t user_buf_len = msg_data_left(msg); bool msg_ready = false; - struct sk_buff *skb; spin_lock_bh(&vvs->rx_lock); @@ -440,18 +431,13 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, } while (!msg_ready) { - struct virtio_vsock_hdr *hdr; - - skb = __skb_dequeue(&vvs->rx_queue); - if (!skb) - break; - hdr = virtio_vsock_hdr(skb); + pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list); if (dequeued_len >= 0) { size_t pkt_len; size_t bytes_to_copy; - pkt_len = (size_t)le32_to_cpu(hdr->len); + pkt_len = (size_t)le32_to_cpu(pkt->hdr.len); bytes_to_copy = min(user_buf_len, pkt_len); if (bytes_to_copy) { @@ -462,7 +448,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, */ spin_unlock_bh(&vvs->rx_lock); - err = memcpy_to_msg(msg, skb->data, bytes_to_copy); + err = memcpy_to_msg(msg, pkt->buf, bytes_to_copy); if (err) { /* Copy of message failed. Rest of * fragments will be freed without copy. @@ -470,7 +456,6 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, dequeued_len = err; } else { user_buf_len -= bytes_to_copy; - skb_pull(skb, bytes_to_copy); } spin_lock_bh(&vvs->rx_lock); @@ -480,16 +465,17 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, dequeued_len += pkt_len; } - if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) { + if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) { msg_ready = true; vvs->msg_count--; - if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) + if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) msg->msg_flags |= MSG_EOR; } - virtio_transport_dec_rx_pkt(vvs, skb); - kfree_skb(skb); + virtio_transport_dec_rx_pkt(vvs, pkt); + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); } spin_unlock_bh(&vvs->rx_lock); @@ -627,7 +613,7 @@ int virtio_transport_do_socket_init(struct vsock_sock *vsk, spin_lock_init(&vvs->rx_lock); spin_lock_init(&vvs->tx_lock); - skb_queue_head_init(&vvs->rx_queue); + INIT_LIST_HEAD(&vvs->rx_queue); return 0; } @@ -824,16 +810,16 @@ void virtio_transport_destruct(struct vsock_sock *vsk) EXPORT_SYMBOL_GPL(virtio_transport_destruct); static int virtio_transport_reset(struct vsock_sock *vsk, - struct sk_buff *skb) + struct virtio_vsock_pkt *pkt) { struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_RST, - .reply = !!skb, + .reply = !!pkt, .vsk = vsk, }; /* Send RST only if the original pkt is not a RST pkt */ - if (skb && le16_to_cpu(virtio_vsock_hdr(skb)->op) == VIRTIO_VSOCK_OP_RST) + if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST) return 0; return virtio_transport_send_pkt_info(vsk, &info); @@ -843,30 +829,29 @@ static int virtio_transport_reset(struct vsock_sock *vsk, * attempt was made to connect to a socket that does not exist. */ static int virtio_transport_reset_no_sock(const struct virtio_transport *t, - struct sk_buff *skb) + struct virtio_vsock_pkt *pkt) { - struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); + struct virtio_vsock_pkt *reply; struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_RST, - .type = le16_to_cpu(hdr->type), + .type = le16_to_cpu(pkt->hdr.type), .reply = true, }; - struct sk_buff *reply; /* Send RST only if the original pkt is not a RST pkt */ - if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST) + if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST) return 0; - reply = virtio_transport_alloc_skb(&info, 0, - le64_to_cpu(hdr->dst_cid), - le32_to_cpu(hdr->dst_port), - le64_to_cpu(hdr->src_cid), - le32_to_cpu(hdr->src_port)); + reply = virtio_transport_alloc_pkt(&info, 0, + le64_to_cpu(pkt->hdr.dst_cid), + le32_to_cpu(pkt->hdr.dst_port), + le64_to_cpu(pkt->hdr.src_cid), + le32_to_cpu(pkt->hdr.src_port)); if (!reply) return -ENOMEM; if (!t) { - kfree_skb(reply); + virtio_transport_free_pkt(reply); return -ENOTCONN; } @@ -877,11 +862,16 @@ static int virtio_transport_reset_no_sock(const struct virtio_transport *t, static void virtio_transport_remove_sock(struct vsock_sock *vsk) { struct virtio_vsock_sock *vvs = vsk->trans; + struct virtio_vsock_pkt *pkt, *tmp; /* We don't need to take rx_lock, as the socket is closing and we are * removing it. */ - __skb_queue_purge(&vvs->rx_queue); + list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) { + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + vsock_remove_sock(vsk); } @@ -995,14 +985,13 @@ EXPORT_SYMBOL_GPL(virtio_transport_release); static int virtio_transport_recv_connecting(struct sock *sk, - struct sk_buff *skb) + struct virtio_vsock_pkt *pkt) { - struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct vsock_sock *vsk = vsock_sk(sk); - int skerr; int err; + int skerr; - switch (le16_to_cpu(hdr->op)) { + switch (le16_to_cpu(pkt->hdr.op)) { case VIRTIO_VSOCK_OP_RESPONSE: sk->sk_state = TCP_ESTABLISHED; sk->sk_socket->state = SS_CONNECTED; @@ -1023,7 +1012,7 @@ virtio_transport_recv_connecting(struct sock *sk, return 0; destroy: - virtio_transport_reset(vsk, skb); + virtio_transport_reset(vsk, pkt); sk->sk_state = TCP_CLOSE; sk->sk_err = skerr; sk_error_report(sk); @@ -1032,37 +1021,34 @@ destroy: static void virtio_transport_recv_enqueue(struct vsock_sock *vsk, - struct sk_buff *skb) + struct virtio_vsock_pkt *pkt) { struct virtio_vsock_sock *vvs = vsk->trans; bool can_enqueue, free_pkt = false; - struct virtio_vsock_hdr *hdr; - u32 len; - hdr = virtio_vsock_hdr(skb); - len = le32_to_cpu(hdr->len); + pkt->len = le32_to_cpu(pkt->hdr.len); + pkt->off = 0; spin_lock_bh(&vvs->rx_lock); - can_enqueue = virtio_transport_inc_rx_pkt(vvs, skb); + can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt); if (!can_enqueue) { free_pkt = true; goto out; } - if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) + if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) vvs->msg_count++; /* Try to copy small packets into the buffer of last packet queued, * to avoid wasting memory queueing the entire buffer with a small * payload. */ - if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) { - struct virtio_vsock_hdr *last_hdr; - struct sk_buff *last_skb; + if (pkt->len <= GOOD_COPY_LEN && !list_empty(&vvs->rx_queue)) { + struct virtio_vsock_pkt *last_pkt; - last_skb = skb_peek_tail(&vvs->rx_queue); - last_hdr = virtio_vsock_hdr(last_skb); + last_pkt = list_last_entry(&vvs->rx_queue, + struct virtio_vsock_pkt, list); /* If there is space in the last packet queued, we copy the * new packet in its buffer. We avoid this if the last packet @@ -1070,35 +1056,35 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk, * delimiter of SEQPACKET message, so 'pkt' is the first packet * of a new message. */ - if (skb->len < skb_tailroom(last_skb) && - !(le32_to_cpu(last_hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)) { - memcpy(skb_put(last_skb, skb->len), skb->data, skb->len); + if ((pkt->len <= last_pkt->buf_len - last_pkt->len) && + !(le32_to_cpu(last_pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM)) { + memcpy(last_pkt->buf + last_pkt->len, pkt->buf, + pkt->len); + last_pkt->len += pkt->len; free_pkt = true; - last_hdr->flags |= hdr->flags; - last_hdr->len = cpu_to_le32(last_skb->len); + last_pkt->hdr.flags |= pkt->hdr.flags; goto out; } } - __skb_queue_tail(&vvs->rx_queue, skb); + list_add_tail(&pkt->list, &vvs->rx_queue); out: spin_unlock_bh(&vvs->rx_lock); if (free_pkt) - kfree_skb(skb); + virtio_transport_free_pkt(pkt); } static int virtio_transport_recv_connected(struct sock *sk, - struct sk_buff *skb) + struct virtio_vsock_pkt *pkt) { - struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct vsock_sock *vsk = vsock_sk(sk); int err = 0; - switch (le16_to_cpu(hdr->op)) { + switch (le16_to_cpu(pkt->hdr.op)) { case VIRTIO_VSOCK_OP_RW: - virtio_transport_recv_enqueue(vsk, skb); + virtio_transport_recv_enqueue(vsk, pkt); vsock_data_ready(sk); return err; case VIRTIO_VSOCK_OP_CREDIT_REQUEST: @@ -1108,17 +1094,18 @@ virtio_transport_recv_connected(struct sock *sk, sk->sk_write_space(sk); break; case VIRTIO_VSOCK_OP_SHUTDOWN: - if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_RCV) + if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV) vsk->peer_shutdown |= RCV_SHUTDOWN; - if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) + if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) vsk->peer_shutdown |= SEND_SHUTDOWN; if (vsk->peer_shutdown == SHUTDOWN_MASK && vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) { (void)virtio_transport_reset(vsk, NULL); + virtio_transport_do_close(vsk, true); } - if (le32_to_cpu(virtio_vsock_hdr(skb)->flags)) + if (le32_to_cpu(pkt->hdr.flags)) sk->sk_state_change(sk); break; case VIRTIO_VSOCK_OP_RST: @@ -1129,30 +1116,28 @@ virtio_transport_recv_connected(struct sock *sk, break; } - kfree_skb(skb); + virtio_transport_free_pkt(pkt); return err; } static void virtio_transport_recv_disconnecting(struct sock *sk, - struct sk_buff *skb) + struct virtio_vsock_pkt *pkt) { - struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct vsock_sock *vsk = vsock_sk(sk); - if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST) + if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST) virtio_transport_do_close(vsk, true); } static int virtio_transport_send_response(struct vsock_sock *vsk, - struct sk_buff *skb) + struct virtio_vsock_pkt *pkt) { - struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_RESPONSE, - .remote_cid = le64_to_cpu(hdr->src_cid), - .remote_port = le32_to_cpu(hdr->src_port), + .remote_cid = le64_to_cpu(pkt->hdr.src_cid), + .remote_port = le32_to_cpu(pkt->hdr.src_port), .reply = true, .vsk = vsk, }; @@ -1161,9 +1146,8 @@ virtio_transport_send_response(struct vsock_sock *vsk, } static bool virtio_transport_space_update(struct sock *sk, - struct sk_buff *skb) + struct virtio_vsock_pkt *pkt) { - struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct vsock_sock *vsk = vsock_sk(sk); struct virtio_vsock_sock *vvs = vsk->trans; bool space_available; @@ -1178,8 +1162,8 @@ static bool virtio_transport_space_update(struct sock *sk, /* buf_alloc and fwd_cnt is always included in the hdr */ spin_lock_bh(&vvs->tx_lock); - vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc); - vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt); + vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc); + vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt); space_available = virtio_transport_has_space(vsk); spin_unlock_bh(&vvs->tx_lock); return space_available; @@ -1187,28 +1171,27 @@ static bool virtio_transport_space_update(struct sock *sk, /* Handle server socket */ static int -virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb, +virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt, struct virtio_transport *t) { - struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct vsock_sock *vsk = vsock_sk(sk); struct vsock_sock *vchild; struct sock *child; int ret; - if (le16_to_cpu(hdr->op) != VIRTIO_VSOCK_OP_REQUEST) { - virtio_transport_reset_no_sock(t, skb); + if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) { + virtio_transport_reset_no_sock(t, pkt); return -EINVAL; } if (sk_acceptq_is_full(sk)) { - virtio_transport_reset_no_sock(t, skb); + virtio_transport_reset_no_sock(t, pkt); return -ENOMEM; } child = vsock_create_connected(sk); if (!child) { - virtio_transport_reset_no_sock(t, skb); + virtio_transport_reset_no_sock(t, pkt); return -ENOMEM; } @@ -1219,10 +1202,10 @@ virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb, child->sk_state = TCP_ESTABLISHED; vchild = vsock_sk(child); - vsock_addr_init(&vchild->local_addr, le64_to_cpu(hdr->dst_cid), - le32_to_cpu(hdr->dst_port)); - vsock_addr_init(&vchild->remote_addr, le64_to_cpu(hdr->src_cid), - le32_to_cpu(hdr->src_port)); + vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid), + le32_to_cpu(pkt->hdr.dst_port)); + vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid), + le32_to_cpu(pkt->hdr.src_port)); ret = vsock_assign_transport(vchild, vsk); /* Transport assigned (looking at remote_addr) must be the same @@ -1230,17 +1213,17 @@ virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb, */ if (ret || vchild->transport != &t->transport) { release_sock(child); - virtio_transport_reset_no_sock(t, skb); + virtio_transport_reset_no_sock(t, pkt); sock_put(child); return ret; } - if (virtio_transport_space_update(child, skb)) + if (virtio_transport_space_update(child, pkt)) child->sk_write_space(child); vsock_insert_connected(vchild); vsock_enqueue_accept(sk, child); - virtio_transport_send_response(vchild, skb); + virtio_transport_send_response(vchild, pkt); release_sock(child); @@ -1258,30 +1241,29 @@ static bool virtio_transport_valid_type(u16 type) * lock. */ void virtio_transport_recv_pkt(struct virtio_transport *t, - struct sk_buff *skb) + struct virtio_vsock_pkt *pkt) { - struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct sockaddr_vm src, dst; struct vsock_sock *vsk; struct sock *sk; bool space_available; - vsock_addr_init(&src, le64_to_cpu(hdr->src_cid), - le32_to_cpu(hdr->src_port)); - vsock_addr_init(&dst, le64_to_cpu(hdr->dst_cid), - le32_to_cpu(hdr->dst_port)); + vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid), + le32_to_cpu(pkt->hdr.src_port)); + vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid), + le32_to_cpu(pkt->hdr.dst_port)); trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port, dst.svm_cid, dst.svm_port, - le32_to_cpu(hdr->len), - le16_to_cpu(hdr->type), - le16_to_cpu(hdr->op), - le32_to_cpu(hdr->flags), - le32_to_cpu(hdr->buf_alloc), - le32_to_cpu(hdr->fwd_cnt)); + le32_to_cpu(pkt->hdr.len), + le16_to_cpu(pkt->hdr.type), + le16_to_cpu(pkt->hdr.op), + le32_to_cpu(pkt->hdr.flags), + le32_to_cpu(pkt->hdr.buf_alloc), + le32_to_cpu(pkt->hdr.fwd_cnt)); - if (!virtio_transport_valid_type(le16_to_cpu(hdr->type))) { - (void)virtio_transport_reset_no_sock(t, skb); + if (!virtio_transport_valid_type(le16_to_cpu(pkt->hdr.type))) { + (void)virtio_transport_reset_no_sock(t, pkt); goto free_pkt; } @@ -1292,13 +1274,13 @@ void virtio_transport_recv_pkt(struct virtio_transport *t, if (!sk) { sk = vsock_find_bound_socket(&dst); if (!sk) { - (void)virtio_transport_reset_no_sock(t, skb); + (void)virtio_transport_reset_no_sock(t, pkt); goto free_pkt; } } - if (virtio_transport_get_type(sk) != le16_to_cpu(hdr->type)) { - (void)virtio_transport_reset_no_sock(t, skb); + if (virtio_transport_get_type(sk) != le16_to_cpu(pkt->hdr.type)) { + (void)virtio_transport_reset_no_sock(t, pkt); sock_put(sk); goto free_pkt; } @@ -1309,13 +1291,13 @@ void virtio_transport_recv_pkt(struct virtio_transport *t, /* Check if sk has been closed before lock_sock */ if (sock_flag(sk, SOCK_DONE)) { - (void)virtio_transport_reset_no_sock(t, skb); + (void)virtio_transport_reset_no_sock(t, pkt); release_sock(sk); sock_put(sk); goto free_pkt; } - space_available = virtio_transport_space_update(sk, skb); + space_available = virtio_transport_space_update(sk, pkt); /* Update CID in case it has changed after a transport reset event */ if (vsk->local_addr.svm_cid != VMADDR_CID_ANY) @@ -1326,23 +1308,23 @@ void virtio_transport_recv_pkt(struct virtio_transport *t, switch (sk->sk_state) { case TCP_LISTEN: - virtio_transport_recv_listen(sk, skb, t); - kfree_skb(skb); + virtio_transport_recv_listen(sk, pkt, t); + virtio_transport_free_pkt(pkt); break; case TCP_SYN_SENT: - virtio_transport_recv_connecting(sk, skb); - kfree_skb(skb); + virtio_transport_recv_connecting(sk, pkt); + virtio_transport_free_pkt(pkt); break; case TCP_ESTABLISHED: - virtio_transport_recv_connected(sk, skb); + virtio_transport_recv_connected(sk, pkt); break; case TCP_CLOSING: - virtio_transport_recv_disconnecting(sk, skb); - kfree_skb(skb); + virtio_transport_recv_disconnecting(sk, pkt); + virtio_transport_free_pkt(pkt); break; default: - (void)virtio_transport_reset_no_sock(t, skb); - kfree_skb(skb); + (void)virtio_transport_reset_no_sock(t, pkt); + virtio_transport_free_pkt(pkt); break; } @@ -1355,42 +1337,16 @@ void virtio_transport_recv_pkt(struct virtio_transport *t, return; free_pkt: - kfree_skb(skb); + virtio_transport_free_pkt(pkt); } EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt); -/* Remove skbs found in a queue that have a vsk that matches. - * - * Each skb is freed. - * - * Returns the count of skbs that were reply packets. - */ -int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue) +void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt) { - struct sk_buff_head freeme; - struct sk_buff *skb, *tmp; - int cnt = 0; - - skb_queue_head_init(&freeme); - - spin_lock_bh(&queue->lock); - skb_queue_walk_safe(queue, skb, tmp) { - if (vsock_sk(skb->sk) != vsk) - continue; - - __skb_unlink(skb, queue); - __skb_queue_tail(&freeme, skb); - - if (virtio_vsock_skb_reply(skb)) - cnt++; - } - spin_unlock_bh(&queue->lock); - - __skb_queue_purge(&freeme); - - return cnt; + kvfree(pkt->buf); + kfree(pkt); } -EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs); +EXPORT_SYMBOL_GPL(virtio_transport_free_pkt); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Asias He"); diff --git a/net/vmw_vsock/vsock_loopback.c b/net/vmw_vsock/vsock_loopback.c index 671e03240fc5..169a8cf65b39 100644 --- a/net/vmw_vsock/vsock_loopback.c +++ b/net/vmw_vsock/vsock_loopback.c @@ -16,7 +16,7 @@ struct vsock_loopback { struct workqueue_struct *workqueue; spinlock_t pkt_list_lock; /* protects pkt_list */ - struct sk_buff_head pkt_queue; + struct list_head pkt_list; struct work_struct pkt_work; }; @@ -27,13 +27,13 @@ static u32 vsock_loopback_get_local_cid(void) return VMADDR_CID_LOCAL; } -static int vsock_loopback_send_pkt(struct sk_buff *skb) +static int vsock_loopback_send_pkt(struct virtio_vsock_pkt *pkt) { struct vsock_loopback *vsock = &the_vsock_loopback; - int len = skb->len; + int len = pkt->len; spin_lock_bh(&vsock->pkt_list_lock); - skb_queue_tail(&vsock->pkt_queue, skb); + list_add_tail(&pkt->list, &vsock->pkt_list); spin_unlock_bh(&vsock->pkt_list_lock); queue_work(vsock->workqueue, &vsock->pkt_work); @@ -44,8 +44,21 @@ static int vsock_loopback_send_pkt(struct sk_buff *skb) static int vsock_loopback_cancel_pkt(struct vsock_sock *vsk) { struct vsock_loopback *vsock = &the_vsock_loopback; + struct virtio_vsock_pkt *pkt, *n; + LIST_HEAD(freeme); - virtio_transport_purge_skbs(vsk, &vsock->pkt_queue); + spin_lock_bh(&vsock->pkt_list_lock); + list_for_each_entry_safe(pkt, n, &vsock->pkt_list, list) { + if (pkt->vsk != vsk) + continue; + list_move(&pkt->list, &freeme); + } + spin_unlock_bh(&vsock->pkt_list_lock); + + list_for_each_entry_safe(pkt, n, &freeme, list) { + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } return 0; } @@ -108,18 +121,20 @@ static void vsock_loopback_work(struct work_struct *work) { struct vsock_loopback *vsock = container_of(work, struct vsock_loopback, pkt_work); - struct sk_buff_head pkts; - struct sk_buff *skb; - - skb_queue_head_init(&pkts); + LIST_HEAD(pkts); spin_lock_bh(&vsock->pkt_list_lock); - skb_queue_splice_init(&vsock->pkt_queue, &pkts); + list_splice_init(&vsock->pkt_list, &pkts); spin_unlock_bh(&vsock->pkt_list_lock); - while ((skb = __skb_dequeue(&pkts))) { - virtio_transport_deliver_tap_pkt(skb); - virtio_transport_recv_pkt(&loopback_transport, skb); + while (!list_empty(&pkts)) { + struct virtio_vsock_pkt *pkt; + + pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list); + list_del_init(&pkt->list); + + virtio_transport_deliver_tap_pkt(pkt); + virtio_transport_recv_pkt(&loopback_transport, pkt); } } @@ -133,7 +148,7 @@ static int __init vsock_loopback_init(void) return -ENOMEM; spin_lock_init(&vsock->pkt_list_lock); - skb_queue_head_init(&vsock->pkt_queue); + INIT_LIST_HEAD(&vsock->pkt_list); INIT_WORK(&vsock->pkt_work, vsock_loopback_work); ret = vsock_core_register(&loopback_transport.transport, @@ -151,13 +166,19 @@ out_wq: static void __exit vsock_loopback_exit(void) { struct vsock_loopback *vsock = &the_vsock_loopback; + struct virtio_vsock_pkt *pkt; vsock_core_unregister(&loopback_transport.transport); flush_work(&vsock->pkt_work); spin_lock_bh(&vsock->pkt_list_lock); - virtio_vsock_skb_queue_purge(&vsock->pkt_queue); + while (!list_empty(&vsock->pkt_list)) { + pkt = list_first_entry(&vsock->pkt_list, + struct virtio_vsock_pkt, list); + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } spin_unlock_bh(&vsock->pkt_list_lock); destroy_workqueue(vsock->workqueue);