]> git.droids-corp.org - dpdk.git/commitdiff
examples/vhost: fix retry logic on Rx path
authorYuan Wang <yuanx.wang@intel.com>
Wed, 22 Jun 2022 09:25:55 +0000 (17:25 +0800)
committerMaxime Coquelin <maxime.coquelin@redhat.com>
Fri, 1 Jul 2022 13:49:49 +0000 (15:49 +0200)
drain_eth_rx() uses rte_vhost_avail_entries() to calculate
the available entries to determine if a retry is required.
However, this function only works with split rings, and
calculating packed rings will return the wrong value and cause
unnecessary retries resulting in a significant performance penalty.

This patch fix that by using the difference between tx/rx burst
as the retry condition.

Fixes: be800696c26e ("examples/vhost: use burst enqueue and dequeue from lib")
Cc: stable@dpdk.org
Signed-off-by: Yuan Wang <yuanx.wang@intel.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
Tested-by: Wei Ling <weix.ling@intel.com>
examples/vhost/main.c

index a679ef738c08a1a8d61a17b7bb035c11b0a1a217..2a4da0e5f54f480725f85ab1bd8378ecca9ace0d 100644 (file)
@@ -634,7 +634,7 @@ us_vhost_usage(const char *prgname)
 {
        RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
        "               --vm2vm [0|1|2]\n"
-       "               --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
+       "               --rx-retry [0|1] --mergeable [0|1] --stats [0-N]\n"
        "               --socket-file <path>\n"
        "               --nb-devices ND\n"
        "               -p PORTMASK: Set mask for ports to be used by application\n"
@@ -1383,27 +1383,21 @@ drain_eth_rx(struct vhost_dev *vdev)
        if (!rx_count)
                return;
 
-       /*
-        * When "enable_retry" is set, here we wait and retry when there
-        * is no enough free slots in the queue to hold @rx_count packets,
-        * to diminish packet loss.
-        */
-       if (enable_retry &&
-           unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
-                       VIRTIO_RXQ))) {
-               uint32_t retry;
+       enqueue_count = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev,
+                                               VIRTIO_RXQ, pkts, rx_count);
 
-               for (retry = 0; retry < burst_rx_retry_num; retry++) {
+       /* Retry if necessary */
+       if (enable_retry && unlikely(enqueue_count < rx_count)) {
+               uint32_t retry = 0;
+
+               while (enqueue_count < rx_count && retry++ < burst_rx_retry_num) {
                        rte_delay_us(burst_rx_delay_time);
-                       if (rx_count <= rte_vhost_avail_entries(vdev->vid,
-                                       VIRTIO_RXQ))
-                               break;
+                       enqueue_count += vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev,
+                                                       VIRTIO_RXQ, &pkts[enqueue_count],
+                                                       rx_count - enqueue_count);
                }
        }
 
-       enqueue_count = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev,
-                                       VIRTIO_RXQ, pkts, rx_count);
-
        if (enable_stats) {
                __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
                                __ATOMIC_SEQ_CST);