ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
m_buf->l2_len);
+ ipv4_hdr->hdr_checksum = 0;
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
}
else
hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
- VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
+ VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
dev->vid, num_buffers);
if (unlikely(buf_len < dev->vhost_hlen)) {
if (unlikely(reserve_avail_buf_split(dev, vq,
pkt_len, buf_vec, &num_buffers,
avail_head, &nr_vec) < 0)) {
- VHOST_LOG_DEBUG(VHOST_DATA,
+ VHOST_LOG_DATA(DEBUG,
"(%d) failed to get enough desc from vring\n",
dev->vid);
vq->shadow_used_idx -= num_buffers;
break;
}
- VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
+ VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
dev->vid, vq->last_avail_idx,
vq->last_avail_idx + num_buffers);
pkts[i]->pkt_len);
}
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
+ lens[i]);
+
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
ids[i] = descs[avail_idx + i].id;
rte_smp_rmb();
if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
&nr_descs) < 0)) {
- VHOST_LOG_DEBUG(VHOST_DATA,
+ VHOST_LOG_DATA(DEBUG,
"(%d) failed to get enough desc from vring\n",
dev->vid);
return -1;
}
- VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
+ VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
dev->vid, vq->last_avail_idx,
vq->last_avail_idx + nr_descs);
rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
if (remained >= PACKED_BATCH_SIZE) {
- if (!virtio_dev_rx_batch_packed(dev, vq, pkts)) {
+ if (!virtio_dev_rx_batch_packed(dev, vq,
+ &pkts[pkt_idx])) {
pkt_idx += PACKED_BATCH_SIZE;
remained -= PACKED_BATCH_SIZE;
continue;
struct vhost_virtqueue *vq;
uint32_t nb_tx = 0;
- VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
dev->vid, __func__, queue_id);
return 0;
}
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- RTE_LOG(ERR, VHOST_DATA,
+ VHOST_LOG_DATA(ERR,
"(%d) %s: built-in vhost net backend is disabled.\n",
dev->vid, __func__);
return 0;
m->l4_len = sizeof(struct rte_udp_hdr);
break;
default:
- RTE_LOG(WARNING, VHOST_DATA,
+ VHOST_LOG_DATA(WARNING,
"unsupported gso type %u.\n", hdr->gso_type);
break;
}
if (mbuf_avail == 0) {
cur = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(cur == NULL)) {
- RTE_LOG(ERR, VHOST_DATA, "Failed to "
+ VHOST_LOG_DATA(ERR, "Failed to "
"allocate memory for mbuf.\n");
error = -1;
goto out;
virtio_dev_extbuf_free, buf);
if (unlikely(shinfo == NULL)) {
rte_free(buf);
- RTE_LOG(ERR, VHOST_DATA, "Failed to init shinfo\n");
+ VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
return -1;
}
}
{
struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
- if (unlikely(pkt == NULL))
+ if (unlikely(pkt == NULL)) {
+ VHOST_LOG_DATA(ERR,
+ "Failed to allocate memory for mbuf.\n");
return NULL;
+ }
if (rte_pktmbuf_tailroom(pkt) >= data_len)
return pkt;
rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
- VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
count = RTE_MIN(count, MAX_PKT_BURST);
count = RTE_MIN(count, free_entries);
- VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
+ VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
dev->vid, count);
for (i = 0; i < count; i++) {
*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
if (unlikely(*pkts == NULL)) {
- RTE_LOG(ERR, VHOST_DATA,
+ VHOST_LOG_DATA(ERR,
"Failed to allocate memory for mbuf.\n");
return -1;
}
return pkt_idx;
}
+static __rte_always_inline bool
+next_desc_is_avail(const struct vhost_virtqueue *vq)
+{
+ bool wrap_counter = vq->avail_wrap_counter;
+ uint16_t next_used_idx = vq->last_used_idx + 1;
+
+ if (next_used_idx >= vq->size) {
+ next_used_idx -= vq->size;
+ wrap_counter ^= 1;
+ }
+
+ return desc_is_avail(&vq->desc_packed[next_used_idx], wrap_counter);
+}
+
static __rte_noinline uint16_t
virtio_dev_tx_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq,
} while (remained);
- if (vq->shadow_used_idx)
+ if (vq->shadow_used_idx) {
do_data_copy_dequeue(vq);
+ if (remained && !next_desc_is_avail(vq)) {
+ /*
+ * The guest may be waiting to TX some buffers to
+ * enqueue more to avoid bufferfloat, so we try to
+ * reduce latency here.
+ */
+ vhost_flush_dequeue_shadow_packed(dev, vq);
+ vhost_vring_call_packed(dev, vq);
+ }
+ }
+
return pkt_idx;
}
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- RTE_LOG(ERR, VHOST_DATA,
+ VHOST_LOG_DATA(ERR,
"(%d) %s: built-in vhost net backend is disabled.\n",
dev->vid, __func__);
return 0;
}
if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
- RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: invalid virtqueue idx %d.\n",
dev->vid, __func__, queue_id);
return 0;
}
rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
if (rarp_mbuf == NULL) {
- RTE_LOG(ERR, VHOST_DATA,
- "Failed to make RARP packet.\n");
+ VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
count = 0;
goto out;
}