From 01689b1ed4863f937e5b9db65a79fb72d764629a Mon Sep 17 00:00:00 2001 From: Marvin Liu Date: Thu, 19 Sep 2019 01:06:54 +0800 Subject: [PATCH] net/virtio: update stats when in-order Tx done When doing xmit in-order enqueue, packets are buffered and then flushed into avail ring. Buffered packets can be dropped due to insufficient space. Moving stats update action just after successful avail ring updates can guarantee correctness. Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx") Cc: stable@dpdk.org Signed-off-by: Marvin Liu Reviewed-by: Maxime Coquelin --- drivers/net/virtio/virtio_rxtx.c | 88 ++++++++++++++++---------------- 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 27ead19fbe..91df5b1d0c 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -106,6 +106,48 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id) dxp->next = VQ_RING_DESC_CHAIN_END; } +static inline void +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf) +{ + uint32_t s = mbuf->pkt_len; + struct rte_ether_addr *ea; + + stats->bytes += s; + + if (s == 64) { + stats->size_bins[1]++; + } else if (s > 64 && s < 1024) { + uint32_t bin; + + /* count zeros, and offset into correct bin */ + bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; + stats->size_bins[bin]++; + } else { + if (s < 64) + stats->size_bins[0]++; + else if (s < 1519) + stats->size_bins[6]++; + else + stats->size_bins[7]++; + } + + ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *); + if (rte_is_multicast_ether_addr(ea)) { + if (rte_is_broadcast_ether_addr(ea)) + stats->broadcast++; + else + stats->multicast++; + } +} + +static inline void +virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m) +{ + VIRTIO_DUMP_PACKET(m, m->data_len); + + virtio_update_packet_stats(&rxvq->stats, m); +} + static uint16_t virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq, struct rte_mbuf **rx_pkts, @@ -317,7 +359,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num) } /* Cleanup from completed inorder transmits. */ -static void +static __rte_always_inline void virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num) { uint16_t i, idx = vq->vq_used_cons_idx; @@ -596,6 +638,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq, dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)]; dxp->cookie = (void *)cookies[i]; dxp->ndescs = 1; + virtio_update_packet_stats(&txvq->stats, cookies[i]); hdr = (struct virtio_net_hdr *) rte_pktmbuf_prepend(cookies[i], head_size); @@ -1083,48 +1126,6 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m) } } -static inline void -virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf) -{ - uint32_t s = mbuf->pkt_len; - struct rte_ether_addr *ea; - - stats->bytes += s; - - if (s == 64) { - stats->size_bins[1]++; - } else if (s > 64 && s < 1024) { - uint32_t bin; - - /* count zeros, and offset into correct bin */ - bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; - stats->size_bins[bin]++; - } else { - if (s < 64) - stats->size_bins[0]++; - else if (s < 1519) - stats->size_bins[6]++; - else - stats->size_bins[7]++; - } - - ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *); - if (rte_is_multicast_ether_addr(ea)) { - if (rte_is_broadcast_ether_addr(ea)) - stats->broadcast++; - else - stats->multicast++; - } -} - -static inline void -virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m) -{ - VIRTIO_DUMP_PACKET(m, m->data_len); - - virtio_update_packet_stats(&rxvq->stats, m); -} - /* Optionally fill offload information in structure */ static inline int virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) @@ -2198,7 +2199,6 @@ virtio_xmit_pkts_inorder(void *tx_queue, inorder_pkts[nb_inorder_pkts] = txm; nb_inorder_pkts++; - virtio_update_packet_stats(&txvq->stats, txm); continue; } -- 2.20.1