From abefc0abc8e770a983c48887ddfcb1a21bb345aa Mon Sep 17 00:00:00 2001 From: Tiwei Bie Date: Tue, 19 Mar 2019 14:43:12 +0800 Subject: [PATCH] net/virtio: improve batching in standard Rx path This patch improves descriptors refill by using the same batching strategy as done in in-order and mergeable path. Signed-off-by: Tiwei Bie Reviewed-by: Jens Freimann Reviewed-by: Maxime Coquelin --- drivers/net/virtio/virtio_rxtx.c | 60 ++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 42d0f533c0..5f6796bdba 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -1211,7 +1211,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw = vq->hw; - struct rte_mbuf *rxm, *new_mbuf; + struct rte_mbuf *rxm; uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; @@ -1281,20 +1281,24 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ - while (likely(!virtqueue_full(vq))) { - new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); - if (unlikely(new_mbuf == NULL)) { - struct rte_eth_dev *dev - = &rte_eth_devices[rxvq->port_id]; - dev->data->rx_mbuf_alloc_failed++; - break; - } - error = virtqueue_enqueue_recv_refill(vq, &new_mbuf, 1); - if (unlikely(error)) { - rte_pktmbuf_free(new_mbuf); - break; + if (likely(!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, + free_cnt) == 0)) { + error = virtqueue_enqueue_recv_refill(vq, new_pkts, + free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { + struct rte_eth_dev *dev = + &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed += free_cnt; } - nb_enqueued++; } if (likely(nb_enqueued)) { @@ -1316,7 +1320,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw = vq->hw; - struct rte_mbuf *rxm, *new_mbuf; + struct rte_mbuf *rxm; uint16_t num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; @@ -1380,20 +1384,24 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ - while (likely(!virtqueue_full(vq))) { - new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); - if (unlikely(new_mbuf == NULL)) { + if (likely(!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, + free_cnt) == 0)) { + error = virtqueue_enqueue_recv_refill_packed(vq, + new_pkts, free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { struct rte_eth_dev *dev = &rte_eth_devices[rxvq->port_id]; - dev->data->rx_mbuf_alloc_failed++; - break; - } - error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1); - if (unlikely(error)) { - rte_pktmbuf_free(new_mbuf); - break; + dev->data->rx_mbuf_alloc_failed += free_cnt; } - nb_enqueued++; } if (likely(nb_enqueued)) { -- 2.20.1