return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
}
+static __rte_always_inline bool
+virtio_net_is_inorder(struct virtio_net *dev)
+{
+ return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
+}
+
static bool
is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
{
vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
}
+static __rte_always_inline void
+vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
+ uint16_t id)
+{
+ vq->shadow_used_packed[0].id = id;
+
+ if (!vq->shadow_used_idx) {
+ vq->shadow_last_used_idx = vq->last_used_idx;
+ vq->shadow_used_packed[0].flags =
+ PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
+ vq->shadow_used_packed[0].len = 0;
+ vq->shadow_used_packed[0].count = 1;
+ vq->shadow_used_idx++;
+ }
+
+ vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
+}
+
static __rte_always_inline void
vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq,
vq_inc_last_used_packed(vq, count);
}
+static __rte_always_inline void
+vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
+ uint16_t buf_id,
+ uint16_t count)
+{
+ uint16_t flags;
+
+ vq->shadow_used_packed[0].id = buf_id;
+
+ flags = vq->desc_packed[vq->last_used_idx].flags;
+ if (vq->used_wrap_counter) {
+ flags |= VRING_DESC_F_USED;
+ flags |= VRING_DESC_F_AVAIL;
+ } else {
+ flags &= ~VRING_DESC_F_USED;
+ flags &= ~VRING_DESC_F_AVAIL;
+ }
+
+ if (!vq->shadow_used_idx) {
+ vq->shadow_last_used_idx = vq->last_used_idx;
+ vq->shadow_used_packed[0].len = 0;
+ vq->shadow_used_packed[0].flags = flags;
+ vq->shadow_used_idx++;
+ }
+
+ vq_inc_last_used_packed(vq, count);
+}
+
static inline void
do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
m_buf->l2_len);
+ ipv4_hdr->hdr_checksum = 0;
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
}
pkts[i]->pkt_len);
}
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
+ lens[i]);
+
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
ids[i] = descs[avail_idx + i].id;
rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
if (remained >= PACKED_BATCH_SIZE) {
- if (!virtio_dev_rx_batch_packed(dev, vq, pkts)) {
+ if (!virtio_dev_rx_batch_packed(dev, vq,
+ &pkts[pkt_idx])) {
pkt_idx += PACKED_BATCH_SIZE;
remained -= PACKED_BATCH_SIZE;
continue;
{
struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
- if (unlikely(pkt == NULL))
+ if (unlikely(pkt == NULL)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "Failed to allocate memory for mbuf.\n");
return NULL;
+ }
if (rte_pktmbuf_tailroom(pkt) >= data_len)
return pkt;
(void *)(uintptr_t)(desc_addrs[i] + buf_offset),
pkts[i]->pkt_len);
- vhost_shadow_dequeue_batch_packed(dev, vq, ids);
+ if (virtio_net_is_inorder(dev))
+ vhost_shadow_dequeue_batch_packed_inorder(vq,
+ ids[PACKED_BATCH_SIZE - 1]);
+ else
+ vhost_shadow_dequeue_batch_packed(dev, vq, ids);
vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
&desc_count))
return -1;
- vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
+ if (virtio_net_is_inorder(dev))
+ vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
+ desc_count);
+ else
+ vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
vq_inc_last_avail_packed(vq, desc_count);