net/virtio: fix refill order in packed ring datapath
authorCheng Jiang <cheng1.jiang@intel.com>
Thu, 8 Jul 2021 09:58:01 +0000 (09:58 +0000)
committerChenbo Xia <chenbo.xia@intel.com>
Wed, 21 Jul 2021 05:56:13 +0000 (07:56 +0200)
The front-end should refill the descriptor with the mbuf indicated by
the buff_id rather then the index of used descriptor. Back-end may
return buffers out of order if async copy mode is enabled.

When initializing rxq, refill the descriptors in order as buff_id is
not available at that time.

Fixes: a76290c8f1cf ("net/virtio: implement Rx path for packed queues")
Cc: stable@dpdk.org
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Signed-off-by: Marvin Liu <yong.liu@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
drivers/net/virtio/virtio_rxtx.c

index 34108fb..f70644b 100644 (file)
@@ -328,13 +328,35 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
        return 0;
 }
 
+static inline void
+virtqueue_refill_single_packed(struct virtqueue *vq,
+                              struct vring_packed_desc *dp,
+                              struct rte_mbuf *cookie)
+{
+       uint16_t flags = vq->vq_packed.cached_flags;
+       struct virtio_hw *hw = vq->hw;
+
+       dp->addr = cookie->buf_iova +
+                       RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+       dp->len = cookie->buf_len -
+               RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+
+       virtqueue_store_flags_packed(dp, flags,
+                                    hw->weak_barriers);
+
+       if (++vq->vq_avail_idx >= vq->vq_nentries) {
+               vq->vq_avail_idx -= vq->vq_nentries;
+               vq->vq_packed.cached_flags ^=
+                       VRING_PACKED_DESC_F_AVAIL_USED;
+               flags = vq->vq_packed.cached_flags;
+       }
+}
+
 static inline int
-virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
                                     struct rte_mbuf **cookie, uint16_t num)
 {
        struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
-       uint16_t flags = vq->vq_packed.cached_flags;
-       struct virtio_hw *hw = vq->hw;
        struct vq_desc_extra *dxp;
        uint16_t idx;
        int i;
@@ -350,24 +372,34 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
                dxp->cookie = (void *)cookie[i];
                dxp->ndescs = 1;
 
-               start_dp[idx].addr = cookie[i]->buf_iova +
-                       RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
-               start_dp[idx].len = cookie[i]->buf_len -
-                       RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+               virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
+       }
+       vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+       return 0;
+}
 
-               vq->vq_desc_head_idx = dxp->next;
-               if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
-                       vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
+static inline int
+virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+                                    struct rte_mbuf **cookie, uint16_t num)
+{
+       struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
+       struct vq_desc_extra *dxp;
+       uint16_t idx, did;
+       int i;
 
-               virtqueue_store_flags_packed(&start_dp[idx], flags,
-                                            hw->weak_barriers);
+       if (unlikely(vq->vq_free_cnt == 0))
+               return -ENOSPC;
+       if (unlikely(vq->vq_free_cnt < num))
+               return -EMSGSIZE;
 
-               if (++vq->vq_avail_idx >= vq->vq_nentries) {
-                       vq->vq_avail_idx -= vq->vq_nentries;
-                       vq->vq_packed.cached_flags ^=
-                               VRING_PACKED_DESC_F_AVAIL_USED;
-                       flags = vq->vq_packed.cached_flags;
-               }
+       for (i = 0; i < num; i++) {
+               idx = vq->vq_avail_idx;
+               did = start_dp[idx].id;
+               dxp = &vq->vq_descx[did];
+               dxp->cookie = (void *)cookie[i];
+               dxp->ndescs = 1;
+
+               virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
        }
        vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
        return 0;
@@ -740,7 +772,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
 
                        /* Enqueue allocated buffers */
                        if (virtio_with_packed_queue(vq->hw))
-                               error = virtqueue_enqueue_recv_refill_packed(vq,
+                               error = virtqueue_enqueue_recv_refill_packed_init(vq,
                                                &m, 1);
                        else
                                error = virtqueue_enqueue_recv_refill(vq,