vhost: fix batch enqueue only handle few packets
[dpdk.git] / lib / librte_vhost / virtio_net.c
index ab67269..21c3117 100644 (file)
@@ -31,6 +31,12 @@ rxvq_is_mergeable(struct virtio_net *dev)
        return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
 }
 
+static  __rte_always_inline bool
+virtio_net_is_inorder(struct virtio_net *dev)
+{
+       return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
+}
+
 static bool
 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
 {
@@ -201,6 +207,24 @@ vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
        vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
 }
 
+static __rte_always_inline void
+vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
+                                         uint16_t id)
+{
+       vq->shadow_used_packed[0].id = id;
+
+       if (!vq->shadow_used_idx) {
+               vq->shadow_last_used_idx = vq->last_used_idx;
+               vq->shadow_used_packed[0].flags =
+                       PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
+               vq->shadow_used_packed[0].len = 0;
+               vq->shadow_used_packed[0].count = 1;
+               vq->shadow_used_idx++;
+       }
+
+       vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
+}
+
 static __rte_always_inline void
 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
                                  struct vhost_virtqueue *vq,
@@ -273,6 +297,34 @@ vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
        vq_inc_last_used_packed(vq, count);
 }
 
+static __rte_always_inline void
+vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
+                                          uint16_t buf_id,
+                                          uint16_t count)
+{
+       uint16_t flags;
+
+       vq->shadow_used_packed[0].id = buf_id;
+
+       flags = vq->desc_packed[vq->last_used_idx].flags;
+       if (vq->used_wrap_counter) {
+               flags |= VRING_DESC_F_USED;
+               flags |= VRING_DESC_F_AVAIL;
+       } else {
+               flags &= ~VRING_DESC_F_USED;
+               flags &= ~VRING_DESC_F_AVAIL;
+       }
+
+       if (!vq->shadow_used_idx) {
+               vq->shadow_last_used_idx = vq->last_used_idx;
+               vq->shadow_used_packed[0].len = 0;
+               vq->shadow_used_packed[0].flags = flags;
+               vq->shadow_used_idx++;
+       }
+
+       vq_inc_last_used_packed(vq, count);
+}
+
 static inline void
 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
@@ -393,6 +445,7 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
 
                ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
                                                   m_buf->l2_len);
+               ipv4_hdr->hdr_checksum = 0;
                ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
        }
 
@@ -1055,6 +1108,10 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev,
                           pkts[i]->pkt_len);
        }
 
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+               vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
+                                          lens[i]);
+
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
                ids[i] = descs[avail_idx + i].id;
 
@@ -1102,7 +1159,8 @@ virtio_dev_rx_packed(struct virtio_net *dev,
                rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
                if (remained >= PACKED_BATCH_SIZE) {
-                       if (!virtio_dev_rx_batch_packed(dev, vq, pkts)) {
+                       if (!virtio_dev_rx_batch_packed(dev, vq,
+                                                       &pkts[pkt_idx])) {
                                pkt_idx += PACKED_BATCH_SIZE;
                                remained -= PACKED_BATCH_SIZE;
                                continue;
@@ -1599,8 +1657,11 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
 {
        struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
 
-       if (unlikely(pkt == NULL))
+       if (unlikely(pkt == NULL)) {
+               RTE_LOG(ERR, VHOST_DATA,
+                       "Failed to allocate memory for mbuf.\n");
                return NULL;
+       }
 
        if (rte_pktmbuf_tailroom(pkt) >= data_len)
                return pkt;
@@ -1844,7 +1905,11 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
                           (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
                           pkts[i]->pkt_len);
 
-       vhost_shadow_dequeue_batch_packed(dev, vq, ids);
+       if (virtio_net_is_inorder(dev))
+               vhost_shadow_dequeue_batch_packed_inorder(vq,
+                       ids[PACKED_BATCH_SIZE - 1]);
+       else
+               vhost_shadow_dequeue_batch_packed(dev, vq, ids);
 
        vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
 
@@ -1901,7 +1966,11 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
                                        &desc_count))
                return -1;
 
-       vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
+       if (virtio_net_is_inorder(dev))
+               vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
+                                                          desc_count);
+       else
+               vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
 
        vq_inc_last_avail_packed(vq, desc_count);