#define MAX_BATCH_LEN 256
-#define VHOST_ASYNC_BATCH_THRESHOLD 32
-
static __rte_always_inline bool
rxvq_is_mergeable(struct virtio_net *dev)
{
static __rte_always_inline void
virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
{
- uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
+ uint64_t csum_l4 = m_buf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
- if (m_buf->ol_flags & PKT_TX_TCP_SEG)
- csum_l4 |= PKT_TX_TCP_CKSUM;
+ if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+ csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
if (csum_l4) {
net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
switch (csum_l4) {
- case PKT_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
cksum));
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
dgram_cksum));
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
cksum));
break;
}
/* IP cksum verification cannot be bypassed, then calculate here */
- if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
+ if (m_buf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
struct rte_ipv4_hdr *ipv4_hdr;
ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
}
- if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
- if (m_buf->ol_flags & PKT_TX_IPV4)
+ if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+ if (m_buf->ol_flags & RTE_MBUF_F_TX_IPV4)
net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else
net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
net_hdr->gso_size = m_buf->tso_segsz;
net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
+ m_buf->l4_len;
- } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
+ } else if (m_buf->ol_flags & RTE_MBUF_F_TX_UDP_SEG) {
net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
net_hdr->gso_size = m_buf->tso_segsz;
net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
}
static __rte_always_inline int
-copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
- struct rte_mbuf *m, struct buf_vector *buf_vec,
- uint16_t nr_vec, uint16_t num_buffers)
+async_iter_initialize(struct virtio_net *dev, struct vhost_async *async)
{
- uint32_t vec_idx = 0;
- uint32_t mbuf_offset, mbuf_avail;
- uint32_t buf_offset, buf_avail;
- uint64_t buf_addr, buf_iova, buf_len;
- uint32_t cpy_len;
- uint64_t hdr_addr;
- struct rte_mbuf *hdr_mbuf;
- struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
- struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
- int error = 0;
+ struct rte_vhost_iov_iter *iter;
- if (unlikely(m == NULL)) {
- error = -1;
- goto out;
- }
-
- buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
- buf_len = buf_vec[vec_idx].buf_len;
-
- if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
- error = -1;
- goto out;
+ if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
+ VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
+ return -1;
}
- hdr_mbuf = m;
- hdr_addr = buf_addr;
- if (unlikely(buf_len < dev->vhost_hlen)) {
- memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
- hdr = &tmp_hdr;
- } else
- hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
-
- VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
- dev->vid, num_buffers);
+ iter = async->iov_iter + async->iter_idx;
+ iter->iov = async->iovec + async->iovec_idx;
+ iter->nr_segs = 0;
- if (unlikely(buf_len < dev->vhost_hlen)) {
- buf_offset = dev->vhost_hlen - buf_len;
- vec_idx++;
- buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
- buf_len = buf_vec[vec_idx].buf_len;
- buf_avail = buf_len - buf_offset;
- } else {
- buf_offset = dev->vhost_hlen;
- buf_avail = buf_len - dev->vhost_hlen;
- }
+ return 0;
+}
- mbuf_avail = rte_pktmbuf_data_len(m);
- mbuf_offset = 0;
- while (mbuf_avail != 0 || m->next != NULL) {
- /* done with current buf, get the next one */
- if (buf_avail == 0) {
- vec_idx++;
- if (unlikely(vec_idx >= nr_vec)) {
- error = -1;
- goto out;
- }
+static __rte_always_inline int
+async_iter_add_iovec(struct virtio_net *dev, struct vhost_async *async,
+ void *src, void *dst, size_t len)
+{
+ struct rte_vhost_iov_iter *iter;
+ struct rte_vhost_iovec *iovec;
- buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
- buf_len = buf_vec[vec_idx].buf_len;
+ if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
+ static bool vhost_max_async_vec_log;
- buf_offset = 0;
- buf_avail = buf_len;
+ if (!vhost_max_async_vec_log) {
+ VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
+ vhost_max_async_vec_log = true;
}
- /* done with current mbuf, get the next one */
- if (mbuf_avail == 0) {
- m = m->next;
-
- mbuf_offset = 0;
- mbuf_avail = rte_pktmbuf_data_len(m);
- }
+ return -1;
+ }
- if (hdr_addr) {
- virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
- if (rxvq_is_mergeable(dev))
- ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
- num_buffers);
+ iter = async->iov_iter + async->iter_idx;
+ iovec = async->iovec + async->iovec_idx;
- if (unlikely(hdr == &tmp_hdr)) {
- copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
- } else {
- PRINT_PACKET(dev, (uintptr_t)hdr_addr,
- dev->vhost_hlen, 0);
- vhost_log_cache_write_iova(dev, vq,
- buf_vec[0].buf_iova,
- dev->vhost_hlen);
- }
+ iovec->src_addr = src;
+ iovec->dst_addr = dst;
+ iovec->len = len;
- hdr_addr = 0;
- }
+ iter->nr_segs++;
+ async->iovec_idx++;
- cpy_len = RTE_MIN(buf_avail, mbuf_avail);
-
- if (likely(cpy_len > MAX_BATCH_LEN ||
- vq->batch_copy_nb_elems >= vq->size)) {
- rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
- rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
- cpy_len);
- vhost_log_cache_write_iova(dev, vq,
- buf_iova + buf_offset,
- cpy_len);
- PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
- cpy_len, 0);
- } else {
- batch_copy[vq->batch_copy_nb_elems].dst =
- (void *)((uintptr_t)(buf_addr + buf_offset));
- batch_copy[vq->batch_copy_nb_elems].src =
- rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
- batch_copy[vq->batch_copy_nb_elems].log_addr =
- buf_iova + buf_offset;
- batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
- vq->batch_copy_nb_elems++;
- }
+ return 0;
+}
- mbuf_avail -= cpy_len;
- mbuf_offset += cpy_len;
- buf_avail -= cpy_len;
- buf_offset += cpy_len;
- }
+static __rte_always_inline void
+async_iter_finalize(struct vhost_async *async)
+{
+ async->iter_idx++;
+}
-out:
+static __rte_always_inline void
+async_iter_cancel(struct vhost_async *async)
+{
+ struct rte_vhost_iov_iter *iter;
- return error;
+ iter = async->iov_iter + async->iter_idx;
+ async->iovec_idx -= iter->nr_segs;
+ iter->nr_segs = 0;
+ iter->iov = NULL;
}
static __rte_always_inline void
-async_fill_vec(struct iovec *v, void *base, size_t len)
+async_iter_reset(struct vhost_async *async)
{
- v->iov_base = base;
- v->iov_len = len;
+ async->iter_idx = 0;
+ async->iovec_idx = 0;
}
-static __rte_always_inline void
-async_fill_iter(struct rte_vhost_iov_iter *it, size_t count,
- struct iovec *vec, unsigned long nr_seg)
+static __rte_always_inline int
+async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf *m, uint32_t mbuf_offset,
+ uint64_t buf_iova, uint32_t cpy_len)
{
- it->offset = 0;
- it->count = count;
+ struct vhost_async *async = vq->async;
+ uint64_t mapped_len;
+ uint32_t buf_offset = 0;
+ void *hpa;
- if (count) {
- it->iov = vec;
- it->nr_segs = nr_seg;
- } else {
- it->iov = 0;
- it->nr_segs = 0;
+ while (cpy_len) {
+ hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
+ buf_iova + buf_offset, cpy_len, &mapped_len);
+ if (unlikely(!hpa)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: failed to get hpa.\n", dev->ifname, __func__);
+ return -1;
+ }
+
+ if (unlikely(async_iter_add_iovec(dev, async,
+ (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
+ mbuf_offset),
+ hpa, (size_t)mapped_len)))
+ return -1;
+
+ cpy_len -= (uint32_t)mapped_len;
+ mbuf_offset += (uint32_t)mapped_len;
+ buf_offset += (uint32_t)mapped_len;
}
+
+ return 0;
}
static __rte_always_inline void
-async_fill_desc(struct rte_vhost_async_desc *desc,
- struct rte_vhost_iov_iter *src, struct rte_vhost_iov_iter *dst)
+sync_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf *m, uint32_t mbuf_offset,
+ uint64_t buf_addr, uint64_t buf_iova, uint32_t cpy_len)
{
- desc->src = src;
- desc->dst = dst;
+ struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
+
+ if (likely(cpy_len > MAX_BATCH_LEN || vq->batch_copy_nb_elems >= vq->size)) {
+ rte_memcpy((void *)((uintptr_t)(buf_addr)),
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
+ cpy_len);
+ vhost_log_cache_write_iova(dev, vq, buf_iova, cpy_len);
+ PRINT_PACKET(dev, (uintptr_t)(buf_addr), cpy_len, 0);
+ } else {
+ batch_copy[vq->batch_copy_nb_elems].dst =
+ (void *)((uintptr_t)(buf_addr));
+ batch_copy[vq->batch_copy_nb_elems].src =
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
+ batch_copy[vq->batch_copy_nb_elems].log_addr = buf_iova;
+ batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
+ vq->batch_copy_nb_elems++;
+ }
}
static __rte_always_inline int
-async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
- struct rte_mbuf *m, struct buf_vector *buf_vec,
- uint16_t nr_vec, uint16_t num_buffers,
- struct iovec *src_iovec, struct iovec *dst_iovec,
- struct rte_vhost_iov_iter *src_it,
- struct rte_vhost_iov_iter *dst_it)
+mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf *m, struct buf_vector *buf_vec,
+ uint16_t nr_vec, uint16_t num_buffers, bool is_async)
{
- struct rte_mbuf *hdr_mbuf;
- struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
- uint64_t buf_addr, buf_iova;
- uint64_t hdr_addr;
- uint64_t mapped_len;
uint32_t vec_idx = 0;
uint32_t mbuf_offset, mbuf_avail;
uint32_t buf_offset, buf_avail;
- uint32_t cpy_len, buf_len;
- int error = 0;
-
- uint32_t tlen = 0;
- int tvec_idx = 0;
- void *hpa;
+ uint64_t buf_addr, buf_iova, buf_len;
+ uint32_t cpy_len;
+ uint64_t hdr_addr;
+ struct rte_mbuf *hdr_mbuf;
+ struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
+ struct vhost_async *async = vq->async;
- if (unlikely(m == NULL)) {
- error = -1;
- goto out;
- }
+ if (unlikely(m == NULL))
+ return -1;
buf_addr = buf_vec[vec_idx].buf_addr;
buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
- if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
- error = -1;
- goto out;
- }
+ if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1))
+ return -1;
hdr_mbuf = m;
hdr_addr = buf_addr;
} else
hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
- VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
- dev->vid, num_buffers);
+ VHOST_LOG_DATA(DEBUG, "(%s) RX: num merge buffers %d\n",
+ dev->ifname, num_buffers);
if (unlikely(buf_len < dev->vhost_hlen)) {
buf_offset = dev->vhost_hlen - buf_len;
mbuf_avail = rte_pktmbuf_data_len(m);
mbuf_offset = 0;
+ if (is_async) {
+ if (async_iter_initialize(dev, async))
+ return -1;
+ }
+
while (mbuf_avail != 0 || m->next != NULL) {
/* done with current buf, get the next one */
if (buf_avail == 0) {
vec_idx++;
- if (unlikely(vec_idx >= nr_vec)) {
- error = -1;
- goto out;
- }
+ if (unlikely(vec_idx >= nr_vec))
+ goto error;
buf_addr = buf_vec[vec_idx].buf_addr;
buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
buf_offset = 0;
- buf_avail = buf_len;
+ buf_avail = buf_len;
}
/* done with current mbuf, get the next one */
m = m->next;
mbuf_offset = 0;
- mbuf_avail = rte_pktmbuf_data_len(m);
+ mbuf_avail = rte_pktmbuf_data_len(m);
}
if (hdr_addr) {
cpy_len = RTE_MIN(buf_avail, mbuf_avail);
- while (unlikely(cpy_len)) {
- hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
- buf_iova + buf_offset,
- cpy_len, &mapped_len);
- if (unlikely(!hpa)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: failed to get hpa.\n",
- dev->vid, __func__);
- error = -1;
- goto out;
- }
-
- async_fill_vec(src_iovec + tvec_idx,
- (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
- mbuf_offset), (size_t)mapped_len);
- async_fill_vec(dst_iovec + tvec_idx,
- hpa, (size_t)mapped_len);
-
- tlen += (uint32_t)mapped_len;
- cpy_len -= (uint32_t)mapped_len;
- mbuf_avail -= (uint32_t)mapped_len;
- mbuf_offset += (uint32_t)mapped_len;
- buf_avail -= (uint32_t)mapped_len;
- buf_offset += (uint32_t)mapped_len;
- tvec_idx++;
+ if (is_async) {
+ if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
+ buf_iova + buf_offset, cpy_len) < 0)
+ goto error;
+ } else {
+ sync_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
+ buf_addr + buf_offset,
+ buf_iova + buf_offset, cpy_len);
}
+
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ buf_avail -= cpy_len;
+ buf_offset += cpy_len;
}
- async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
- async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
-out:
- return error;
+ if (is_async)
+ async_iter_finalize(async);
+
+ return 0;
+error:
+ if (is_async)
+ async_iter_cancel(async);
+
+ return -1;
}
static __rte_always_inline int
avail_idx -= vq->size;
}
- if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
+ if (mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers, false) < 0)
return -1;
vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
pkt_len, buf_vec, &num_buffers,
avail_head, &nr_vec) < 0)) {
VHOST_LOG_DATA(DEBUG,
- "(%d) failed to get enough desc from vring\n",
- dev->vid);
+ "(%s) failed to get enough desc from vring\n",
+ dev->ifname);
vq->shadow_used_idx -= num_buffers;
break;
}
- VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
- dev->vid, vq->last_avail_idx,
+ VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
+ dev->ifname, vq->last_avail_idx,
vq->last_avail_idx + num_buffers);
- if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
- buf_vec, nr_vec,
- num_buffers) < 0) {
+ if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec,
+ num_buffers, false) < 0) {
vq->shadow_used_idx -= num_buffers;
break;
}
if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
&nr_descs) < 0)) {
- VHOST_LOG_DATA(DEBUG,
- "(%d) failed to get enough desc from vring\n",
- dev->vid);
+ VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
+ dev->ifname);
return -1;
}
- VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
- dev->vid, vq->last_avail_idx,
+ VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
+ dev->ifname, vq->last_avail_idx,
vq->last_avail_idx + nr_descs);
vq_inc_last_avail_packed(vq, nr_descs);
struct vhost_virtqueue *vq;
uint32_t nb_tx = 0;
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: built-in vhost net backend is disabled.\n",
- dev->vid, __func__);
+ VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
+ dev->ifname, __func__);
return 0;
}
}
static __rte_always_inline uint16_t
-virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,
- uint16_t vq_size, uint16_t n_inflight)
+async_get_first_inflight_pkt_idx(struct vhost_virtqueue *vq)
{
- return pkts_idx > n_inflight ? (pkts_idx - n_inflight) :
- (vq_size - n_inflight + pkts_idx) % vq_size;
+ struct vhost_async *async = vq->async;
+
+ if (async->pkts_idx >= async->pkts_inflight_n)
+ return async->pkts_idx - async->pkts_inflight_n;
+ else
+ return vq->size - async->pkts_inflight_n + async->pkts_idx;
}
static __rte_always_inline void
struct rte_mbuf **pkts, uint32_t count)
{
struct buf_vector buf_vec[BUF_VECTOR_MAX];
- uint32_t pkt_idx = 0, pkt_burst_idx = 0;
+ uint32_t pkt_idx = 0;
uint16_t num_buffers;
uint16_t avail_head;
- struct rte_vhost_iov_iter *it_pool = vq->it_pool;
- struct iovec *vec_pool = vq->vec_pool;
- struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
- struct iovec *src_iovec = vec_pool;
- struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
- struct async_inflight_info *pkts_info = vq->async_pkts_info;
- uint32_t n_pkts = 0, pkt_err = 0;
+ struct vhost_async *async = vq->async;
+ struct async_inflight_info *pkts_info = async->pkts_info;
+ uint32_t pkt_err = 0;
int32_t n_xfer;
- uint16_t segs_await = 0;
- uint16_t iovec_idx = 0, it_idx = 0, slot_idx = 0;
+ uint16_t slot_idx = 0;
/*
* The ordering between avail index and desc reads need to be enforced.
rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
+ async_iter_reset(async);
+
for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
uint16_t nr_vec = 0;
- if (unlikely(reserve_avail_buf_split(dev, vq,
- pkt_len, buf_vec, &num_buffers,
- avail_head, &nr_vec) < 0)) {
- VHOST_LOG_DATA(DEBUG,
- "(%d) failed to get enough desc from vring\n",
- dev->vid);
+ if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec,
+ &num_buffers, avail_head, &nr_vec) < 0)) {
+ VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
+ dev->ifname);
vq->shadow_used_idx -= num_buffers;
break;
}
- VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
- dev->vid, vq->last_avail_idx,
- vq->last_avail_idx + num_buffers);
+ VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
+ dev->ifname, vq->last_avail_idx, vq->last_avail_idx + num_buffers);
- if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers,
- &src_iovec[iovec_idx], &dst_iovec[iovec_idx],
- &it_pool[it_idx], &it_pool[it_idx + 1]) < 0) {
+ if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers, true) < 0) {
vq->shadow_used_idx -= num_buffers;
break;
}
- async_fill_desc(&tdes[pkt_burst_idx++], &it_pool[it_idx],
- &it_pool[it_idx + 1]);
-
- slot_idx = (vq->async_pkts_idx + pkt_idx) & (vq->size - 1);
+ slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
pkts_info[slot_idx].descs = num_buffers;
pkts_info[slot_idx].mbuf = pkts[pkt_idx];
- iovec_idx += it_pool[it_idx].nr_segs;
- segs_await += it_pool[it_idx].nr_segs;
- it_idx += 2;
-
vq->last_avail_idx += num_buffers;
-
- /*
- * conditions to trigger async device transfer:
- * - buffered packet number reaches transfer threshold
- * - unused async iov number is less than max vhost vector
- */
- if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
- ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
- BUF_VECTOR_MAX))) {
- n_xfer = vq->async_ops.transfer_data(dev->vid,
- queue_id, tdes, 0, pkt_burst_idx);
- if (likely(n_xfer >= 0)) {
- n_pkts = n_xfer;
- } else {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: failed to transfer data for queue id %d.\n",
- dev->vid, __func__, queue_id);
- n_pkts = 0;
- }
-
- iovec_idx = 0;
- it_idx = 0;
- segs_await = 0;
-
- if (unlikely(n_pkts < pkt_burst_idx)) {
- /*
- * log error packets number here and do actual
- * error processing when applications poll
- * completion
- */
- pkt_err = pkt_burst_idx - n_pkts;
- pkt_idx++;
- pkt_burst_idx = 0;
- break;
- }
-
- pkt_burst_idx = 0;
- }
}
- if (pkt_burst_idx) {
- n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
- if (likely(n_xfer >= 0)) {
- n_pkts = n_xfer;
- } else {
- VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
- dev->vid, __func__, queue_id);
- n_pkts = 0;
- }
+ if (unlikely(pkt_idx == 0))
+ return 0;
- if (unlikely(n_pkts < pkt_burst_idx))
- pkt_err = pkt_burst_idx - n_pkts;
+ n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
+ if (unlikely(n_xfer < 0)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: failed to transfer data for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
+ n_xfer = 0;
}
+ pkt_err = pkt_idx - n_xfer;
if (unlikely(pkt_err)) {
uint16_t num_descs = 0;
/* update number of completed packets */
- pkt_idx -= pkt_err;
+ pkt_idx = n_xfer;
/* calculate the sum of descriptors to revert */
while (pkt_err-- > 0) {
/* keep used descriptors */
if (likely(vq->shadow_used_idx)) {
- uint16_t to = vq->async_desc_idx_split & (vq->size - 1);
+ uint16_t to = async->desc_idx_split & (vq->size - 1);
store_dma_desc_info_split(vq->shadow_used_split,
- vq->async_descs_split, vq->size, 0, to,
+ async->descs_split, vq->size, 0, to,
vq->shadow_used_idx);
- vq->async_desc_idx_split += vq->shadow_used_idx;
- vq->async_pkts_idx += pkt_idx;
- vq->async_pkts_inflight_n += pkt_idx;
+ async->desc_idx_split += vq->shadow_used_idx;
+
+ async->pkts_idx += pkt_idx;
+ if (async->pkts_idx >= vq->size)
+ async->pkts_idx -= vq->size;
+
+ async->pkts_inflight_n += pkt_idx;
vq->shadow_used_idx = 0;
}
return pkt_idx;
}
-static __rte_always_inline void
-vhost_update_used_packed(struct vhost_virtqueue *vq,
- struct vring_used_elem_packed *shadow_ring,
- uint16_t count)
-{
- int i;
- uint16_t used_idx = vq->last_used_idx;
- uint16_t head_idx = vq->last_used_idx;
- uint16_t head_flags = 0;
-
- if (count == 0)
- return;
-
- /* Split loop in two to save memory barriers */
- for (i = 0; i < count; i++) {
- vq->desc_packed[used_idx].id = shadow_ring[i].id;
- vq->desc_packed[used_idx].len = shadow_ring[i].len;
-
- used_idx += shadow_ring[i].count;
- if (used_idx >= vq->size)
- used_idx -= vq->size;
- }
-
- /* The ordering for storing desc flags needs to be enforced. */
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
-
- for (i = 0; i < count; i++) {
- uint16_t flags;
-
- if (vq->shadow_used_packed[i].len)
- flags = VRING_DESC_F_WRITE;
- else
- flags = 0;
-
- if (vq->used_wrap_counter) {
- flags |= VRING_DESC_F_USED;
- flags |= VRING_DESC_F_AVAIL;
- } else {
- flags &= ~VRING_DESC_F_USED;
- flags &= ~VRING_DESC_F_AVAIL;
- }
-
- if (i > 0) {
- vq->desc_packed[vq->last_used_idx].flags = flags;
- } else {
- head_idx = vq->last_used_idx;
- head_flags = flags;
- }
-
- vq_inc_last_used_packed(vq, shadow_ring[i].count);
- }
-
- vq->desc_packed[head_idx].flags = head_flags;
-}
static __rte_always_inline int
vhost_enqueue_async_packed(struct virtio_net *dev,
struct rte_mbuf *pkt,
struct buf_vector *buf_vec,
uint16_t *nr_descs,
- uint16_t *nr_buffers,
- struct iovec *src_iovec, struct iovec *dst_iovec,
- struct rte_vhost_iov_iter *src_it,
- struct rte_vhost_iov_iter *dst_it)
+ uint16_t *nr_buffers)
{
uint16_t nr_vec = 0;
uint16_t avail_idx = vq->last_avail_idx;
if (unlikely(++tries > max_tries))
return -1;
- if (unlikely(fill_vec_buf_packed(dev, vq, avail_idx, &desc_count, buf_vec, &nr_vec,
- &buf_id, &len, VHOST_ACCESS_RW) < 0))
+ if (unlikely(fill_vec_buf_packed(dev, vq,
+ avail_idx, &desc_count,
+ buf_vec, &nr_vec,
+ &buf_id, &len,
+ VHOST_ACCESS_RW) < 0))
return -1;
len = RTE_MIN(len, size);
avail_idx -= vq->size;
}
- if (unlikely(async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec,
- *nr_buffers, src_iovec, dst_iovec,
- src_it, dst_it) < 0))
+ if (unlikely(mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, true) < 0))
return -1;
vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);
static __rte_always_inline int16_t
virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
- struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers,
- struct iovec *src_iovec, struct iovec *dst_iovec,
- struct rte_vhost_iov_iter *src_it, struct rte_vhost_iov_iter *dst_it)
+ struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers)
{
struct buf_vector buf_vec[BUF_VECTOR_MAX];
- if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec, nr_descs, nr_buffers,
- src_iovec, dst_iovec,
- src_it, dst_it) < 0)) {
- VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from vring\n", dev->vid);
+ if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec,
+ nr_descs, nr_buffers) < 0)) {
+ VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n", dev->ifname);
return -1;
}
- VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
- dev->vid, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
+ VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
+ dev->ifname, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
return 0;
}
{
uint16_t descs_err = 0;
uint16_t buffers_err = 0;
- struct async_inflight_info *pkts_info = vq->async_pkts_info;
+ struct async_inflight_info *pkts_info = vq->async->pkts_info;
*pkt_idx -= nr_err;
/* calculate the sum of buffers and descs of DMA-error packets. */
struct vhost_virtqueue *vq, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count)
{
- uint32_t pkt_idx = 0, pkt_burst_idx = 0;
+ uint32_t pkt_idx = 0;
uint32_t remained = count;
int32_t n_xfer;
uint16_t num_buffers;
uint16_t num_descs;
- struct rte_vhost_iov_iter *it_pool = vq->it_pool;
- struct iovec *vec_pool = vq->vec_pool;
- struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
- struct iovec *src_iovec = vec_pool;
- struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
- struct async_inflight_info *pkts_info = vq->async_pkts_info;
- uint32_t n_pkts = 0, pkt_err = 0;
+ struct vhost_async *async = vq->async;
+ struct async_inflight_info *pkts_info = async->pkts_info;
+ uint32_t pkt_err = 0;
uint16_t slot_idx = 0;
- uint16_t segs_await = 0;
- uint16_t iovec_idx = 0, it_idx = 0;
do {
rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
num_buffers = 0;
num_descs = 0;
if (unlikely(virtio_dev_rx_async_packed(dev, vq, pkts[pkt_idx],
- &num_descs, &num_buffers,
- &src_iovec[iovec_idx], &dst_iovec[iovec_idx],
- &it_pool[it_idx], &it_pool[it_idx + 1]) < 0))
+ &num_descs, &num_buffers) < 0))
break;
- slot_idx = (vq->async_pkts_idx + pkt_idx) % vq->size;
+ slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
- async_fill_desc(&tdes[pkt_burst_idx++], &it_pool[it_idx],
- &it_pool[it_idx + 1]);
pkts_info[slot_idx].descs = num_descs;
pkts_info[slot_idx].nr_buffers = num_buffers;
pkts_info[slot_idx].mbuf = pkts[pkt_idx];
- iovec_idx += it_pool[it_idx].nr_segs;
- segs_await += it_pool[it_idx].nr_segs;
- it_idx += 2;
pkt_idx++;
remained--;
vq_inc_last_avail_packed(vq, num_descs);
-
- /*
- * conditions to trigger async device transfer:
- * - buffered packet number reaches transfer threshold
- * - unused async iov number is less than max vhost vector
- */
- if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
- ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
- n_xfer = vq->async_ops.transfer_data(dev->vid,
- queue_id, tdes, 0, pkt_burst_idx);
- if (likely(n_xfer >= 0)) {
- n_pkts = n_xfer;
- } else {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: failed to transfer data for queue id %d.\n",
- dev->vid, __func__, queue_id);
- n_pkts = 0;
- }
-
- iovec_idx = 0;
- it_idx = 0;
- segs_await = 0;
-
- if (unlikely(n_pkts < pkt_burst_idx)) {
- /*
- * log error packets number here and do actual
- * error processing when applications poll
- * completion
- */
- pkt_err = pkt_burst_idx - n_pkts;
- pkt_burst_idx = 0;
- break;
- }
-
- pkt_burst_idx = 0;
- }
} while (pkt_idx < count);
- if (pkt_burst_idx) {
- n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
- if (likely(n_xfer >= 0)) {
- n_pkts = n_xfer;
- } else {
- VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
- dev->vid, __func__, queue_id);
- n_pkts = 0;
- }
+ if (unlikely(pkt_idx == 0))
+ return 0;
- if (unlikely(n_pkts < pkt_burst_idx))
- pkt_err = pkt_burst_idx - n_pkts;
+ n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
+ if (unlikely(n_xfer < 0)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: failed to transfer data for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
+ n_xfer = 0;
}
+ pkt_err = pkt_idx - n_xfer;
+
+ async_iter_reset(async);
+
if (unlikely(pkt_err))
dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
if (likely(vq->shadow_used_idx)) {
/* keep used descriptors. */
- store_dma_desc_info_packed(vq->shadow_used_packed, vq->async_buffers_packed,
- vq->size, 0, vq->async_buffer_idx_packed,
+ store_dma_desc_info_packed(vq->shadow_used_packed, async->buffers_packed,
+ vq->size, 0, async->buffer_idx_packed,
vq->shadow_used_idx);
- vq->async_buffer_idx_packed += vq->shadow_used_idx;
- if (vq->async_buffer_idx_packed >= vq->size)
- vq->async_buffer_idx_packed -= vq->size;
+ async->buffer_idx_packed += vq->shadow_used_idx;
+ if (async->buffer_idx_packed >= vq->size)
+ async->buffer_idx_packed -= vq->size;
- vq->async_pkts_idx += pkt_idx;
- if (vq->async_pkts_idx >= vq->size)
- vq->async_pkts_idx -= vq->size;
+ async->pkts_idx += pkt_idx;
+ if (async->pkts_idx >= vq->size)
+ async->pkts_idx -= vq->size;
vq->shadow_used_idx = 0;
- vq->async_pkts_inflight_n += pkt_idx;
+ async->pkts_inflight_n += pkt_idx;
}
return pkt_idx;
static __rte_always_inline void
write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
{
+ struct vhost_async *async = vq->async;
uint16_t nr_left = n_descs;
uint16_t nr_copy;
uint16_t to, from;
do {
- from = vq->last_async_desc_idx_split & (vq->size - 1);
+ from = async->last_desc_idx_split & (vq->size - 1);
nr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;
to = vq->last_used_idx & (vq->size - 1);
if (to + nr_copy <= vq->size) {
- rte_memcpy(&vq->used->ring[to], &vq->async_descs_split[from],
+ rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
nr_copy * sizeof(struct vring_used_elem));
} else {
uint16_t size = vq->size - to;
- rte_memcpy(&vq->used->ring[to], &vq->async_descs_split[from],
+ rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
size * sizeof(struct vring_used_elem));
- rte_memcpy(&vq->used->ring[0], &vq->async_descs_split[from + size],
+ rte_memcpy(&vq->used->ring[0], &async->descs_split[from + size],
(nr_copy - size) * sizeof(struct vring_used_elem));
}
- vq->last_async_desc_idx_split += nr_copy;
+ async->last_desc_idx_split += nr_copy;
vq->last_used_idx += nr_copy;
nr_left -= nr_copy;
} while (nr_left > 0);
write_back_completed_descs_packed(struct vhost_virtqueue *vq,
uint16_t n_buffers)
{
- uint16_t nr_left = n_buffers;
- uint16_t from, to;
+ struct vhost_async *async = vq->async;
+ uint16_t from = async->last_buffer_idx_packed;
+ uint16_t used_idx = vq->last_used_idx;
+ uint16_t head_idx = vq->last_used_idx;
+ uint16_t head_flags = 0;
+ uint16_t i;
- do {
- from = vq->last_async_buffer_idx_packed;
- to = (from + nr_left) % vq->size;
- if (to > from) {
- vhost_update_used_packed(vq, vq->async_buffers_packed + from, to - from);
- vq->last_async_buffer_idx_packed += nr_left;
- nr_left = 0;
+ /* Split loop in two to save memory barriers */
+ for (i = 0; i < n_buffers; i++) {
+ vq->desc_packed[used_idx].id = async->buffers_packed[from].id;
+ vq->desc_packed[used_idx].len = async->buffers_packed[from].len;
+
+ used_idx += async->buffers_packed[from].count;
+ if (used_idx >= vq->size)
+ used_idx -= vq->size;
+
+ from++;
+ if (from >= vq->size)
+ from = 0;
+ }
+
+ /* The ordering for storing desc flags needs to be enforced. */
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
+
+ from = async->last_buffer_idx_packed;
+
+ for (i = 0; i < n_buffers; i++) {
+ uint16_t flags;
+
+ if (async->buffers_packed[from].len)
+ flags = VRING_DESC_F_WRITE;
+ else
+ flags = 0;
+
+ if (vq->used_wrap_counter) {
+ flags |= VRING_DESC_F_USED;
+ flags |= VRING_DESC_F_AVAIL;
} else {
- vhost_update_used_packed(vq, vq->async_buffers_packed + from,
- vq->size - from);
- vq->last_async_buffer_idx_packed = 0;
- nr_left -= vq->size - from;
+ flags &= ~VRING_DESC_F_USED;
+ flags &= ~VRING_DESC_F_AVAIL;
}
- } while (nr_left > 0);
+
+ if (i > 0) {
+ vq->desc_packed[vq->last_used_idx].flags = flags;
+ } else {
+ head_idx = vq->last_used_idx;
+ head_flags = flags;
+ }
+
+ vq_inc_last_used_packed(vq, async->buffers_packed[from].count);
+
+ from++;
+ if (from == vq->size)
+ from = 0;
+ }
+
+ vq->desc_packed[head_idx].flags = head_flags;
+ async->last_buffer_idx_packed = from;
}
static __rte_always_inline uint16_t
vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
- struct vhost_virtqueue *vq;
- struct async_inflight_info *pkts_info;
+ struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
+ struct vhost_async *async = vq->async;
+ struct async_inflight_info *pkts_info = async->pkts_info;
int32_t n_cpl;
- uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
- uint16_t start_idx, pkts_idx, vq_size;
- uint16_t from, i;
+ uint16_t n_descs = 0, n_buffers = 0;
+ uint16_t start_idx, from, i;
- vq = dev->virtqueue[queue_id];
- pkts_idx = vq->async_pkts_idx % vq->size;
- pkts_info = vq->async_pkts_info;
- vq_size = vq->size;
- start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
- vq_size, vq->async_pkts_inflight_n);
-
- if (count > vq->async_last_pkts_n) {
- n_cpl = vq->async_ops.check_completed_copies(dev->vid,
- queue_id, 0, count - vq->async_last_pkts_n);
- if (likely(n_cpl >= 0)) {
- n_pkts_cpl = n_cpl;
- } else {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: failed to check completed copies for queue id %d.\n",
- dev->vid, __func__, queue_id);
- n_pkts_cpl = 0;
- }
+ n_cpl = async->ops.check_completed_copies(dev->vid, queue_id, 0, count);
+ if (unlikely(n_cpl < 0)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: failed to check completed copies for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
+ return 0;
}
- n_pkts_cpl += vq->async_last_pkts_n;
- n_pkts_put = RTE_MIN(n_pkts_cpl, count);
- if (unlikely(n_pkts_put == 0)) {
- vq->async_last_pkts_n = n_pkts_cpl;
+ if (n_cpl == 0)
return 0;
- }
- if (vq_is_packed(dev)) {
- for (i = 0; i < n_pkts_put; i++) {
- from = (start_idx + i) % vq_size;
- n_buffers += pkts_info[from].nr_buffers;
- pkts[i] = pkts_info[from].mbuf;
- }
- } else {
- for (i = 0; i < n_pkts_put; i++) {
- from = (start_idx + i) & (vq_size - 1);
- n_descs += pkts_info[from].descs;
- pkts[i] = pkts_info[from].mbuf;
- }
+ start_idx = async_get_first_inflight_pkt_idx(vq);
+
+ for (i = 0; i < n_cpl; i++) {
+ from = (start_idx + i) % vq->size;
+ /* Only used with packed ring */
+ n_buffers += pkts_info[from].nr_buffers;
+ /* Only used with split ring */
+ n_descs += pkts_info[from].descs;
+ pkts[i] = pkts_info[from].mbuf;
}
- vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
- vq->async_pkts_inflight_n -= n_pkts_put;
+
+ async->pkts_inflight_n -= n_cpl;
if (likely(vq->enabled && vq->access_ok)) {
if (vq_is_packed(dev)) {
write_back_completed_descs_packed(vq, n_buffers);
-
vhost_vring_call_packed(dev, vq);
} else {
write_back_completed_descs_split(vq, n_descs);
-
- __atomic_add_fetch(&vq->used->idx, n_descs,
- __ATOMIC_RELEASE);
+ __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
vhost_vring_call_split(dev, vq);
}
} else {
if (vq_is_packed(dev)) {
- vq->last_async_buffer_idx_packed += n_buffers;
- if (vq->last_async_buffer_idx_packed >= vq->size)
- vq->last_async_buffer_idx_packed -= vq->size;
+ async->last_buffer_idx_packed += n_buffers;
+ if (async->last_buffer_idx_packed >= vq->size)
+ async->last_buffer_idx_packed -= vq->size;
} else {
- vq->last_async_desc_idx_split += n_descs;
+ async->last_desc_idx_split += n_descs;
}
}
- return n_pkts_put;
+ return n_cpl;
}
uint16_t
if (unlikely(!dev))
return 0;
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
+ if (unlikely(!vq->async)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
if (!dev)
return 0;
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
+ if (unlikely(!vq->async)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
struct vhost_virtqueue *vq;
uint32_t nb_tx = 0;
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
rte_spinlock_lock(&vq->access_lock);
- if (unlikely(!vq->enabled || !vq->async_registered))
+ if (unlikely(!vq->enabled || !vq->async))
goto out_access_unlock;
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: built-in vhost net backend is disabled.\n",
- dev->vid, __func__);
+ VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
+ dev->ifname, __func__);
return 0;
}
m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
if (data_len < m->l2_len + m->l3_len)
goto error;
- m->ol_flags |= PKT_TX_IPV4;
+ m->ol_flags |= RTE_MBUF_F_TX_IPV4;
*l4_proto = ipv4_hdr->next_proto_id;
break;
case RTE_ETHER_TYPE_IPV6:
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
m->l2_len);
m->l3_len = sizeof(struct rte_ipv6_hdr);
- m->ol_flags |= PKT_TX_IPV6;
+ m->ol_flags |= RTE_MBUF_F_TX_IPV6;
*l4_proto = ipv6_hdr->proto;
break;
default:
}
static __rte_always_inline void
-vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
+vhost_dequeue_offload_legacy(struct virtio_net *dev, struct virtio_net_hdr *hdr,
+ struct rte_mbuf *m)
{
uint8_t l4_proto = 0;
struct rte_tcp_hdr *tcp_hdr = NULL;
case (offsetof(struct rte_tcp_hdr, cksum)):
if (l4_proto != IPPROTO_TCP)
goto error;
- m->ol_flags |= PKT_TX_TCP_CKSUM;
+ m->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
break;
case (offsetof(struct rte_udp_hdr, dgram_cksum)):
if (l4_proto != IPPROTO_UDP)
goto error;
- m->ol_flags |= PKT_TX_UDP_CKSUM;
+ m->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
break;
case (offsetof(struct rte_sctp_hdr, cksum)):
if (l4_proto != IPPROTO_SCTP)
goto error;
- m->ol_flags |= PKT_TX_SCTP_CKSUM;
+ m->ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
break;
default:
goto error;
tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
if (data_len < m->l2_len + m->l3_len + tcp_len)
goto error;
- m->ol_flags |= PKT_TX_TCP_SEG;
+ m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
m->tso_segsz = hdr->gso_size;
m->l4_len = tcp_len;
break;
case VIRTIO_NET_HDR_GSO_UDP:
if (l4_proto != IPPROTO_UDP)
goto error;
- m->ol_flags |= PKT_TX_UDP_SEG;
+ m->ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
m->tso_segsz = hdr->gso_size;
m->l4_len = sizeof(struct rte_udp_hdr);
break;
default:
- VHOST_LOG_DATA(WARNING,
- "unsupported gso type %u.\n", hdr->gso_type);
+ VHOST_LOG_DATA(WARNING, "(%s) unsupported gso type %u.\n",
+ dev->ifname, hdr->gso_type);
goto error;
}
}
}
static __rte_always_inline void
-vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m,
- bool legacy_ol_flags)
+vhost_dequeue_offload(struct virtio_net *dev, struct virtio_net_hdr *hdr,
+ struct rte_mbuf *m, bool legacy_ol_flags)
{
struct rte_net_hdr_lens hdr_lens;
int l4_supported = 0;
return;
if (legacy_ol_flags) {
- vhost_dequeue_offload_legacy(hdr, m);
+ vhost_dequeue_offload_legacy(dev, hdr, m);
return;
}
- m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
m->packet_type = ptype;
hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
if (hdr->csum_start <= hdrlen && l4_supported != 0) {
- m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
} else {
/* Unknown proto or tunnel, do sw cksum. We can assume
* the cksum field is in the first segment since the
case VIRTIO_NET_HDR_GSO_TCPV6:
if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_TCP)
break;
- m->ol_flags |= PKT_RX_LRO | PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
m->tso_segsz = hdr->gso_size;
break;
case VIRTIO_NET_HDR_GSO_UDP:
if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_UDP)
break;
- m->ol_flags |= PKT_RX_LRO | PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
m->tso_segsz = hdr->gso_size;
break;
default:
if (mbuf_avail == 0) {
cur = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(cur == NULL)) {
- VHOST_LOG_DATA(ERR, "Failed to "
- "allocate memory for mbuf.\n");
+ VHOST_LOG_DATA(ERR, "(%s) failed to allocate memory for mbuf.\n",
+ dev->ifname);
error = -1;
goto out;
}
m->pkt_len += mbuf_offset;
if (hdr)
- vhost_dequeue_offload(hdr, m, legacy_ol_flags);
+ vhost_dequeue_offload(dev, hdr, m, legacy_ol_flags);
out:
}
static int
-virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
+virtio_dev_extbuf_alloc(struct virtio_net *dev, struct rte_mbuf *pkt, uint32_t size)
{
struct rte_mbuf_ext_shared_info *shinfo = NULL;
uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
virtio_dev_extbuf_free, buf);
if (unlikely(shinfo == NULL)) {
rte_free(buf);
- VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
+ VHOST_LOG_DATA(ERR, "(%s) failed to init shinfo\n", dev->ifname);
return -1;
}
return 0;
/* attach an external buffer if supported */
- if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+ if (dev->extbuf && !virtio_dev_extbuf_alloc(dev, pkt, data_len))
return 0;
/* check if chained buffers are allowed */
rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
count = RTE_MIN(count, MAX_PKT_BURST);
count = RTE_MIN(count, free_entries);
- VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
- dev->vid, count);
+ VHOST_LOG_DATA(DEBUG, "(%s) about to dequeue %u buffers\n",
+ dev->ifname, count);
if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
return 0;
* is required. Drop this packet.
*/
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "Failed mbuf alloc of size %d from %s on %s.\n",
- buf_len, mbuf_pool->name, dev->ifname);
+ VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
+ dev->ifname, buf_len, mbuf_pool->name);
allocerr_warned = true;
}
dropped += 1;
mbuf_pool, legacy_ol_flags);
if (unlikely(err)) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "Failed to copy desc to mbuf on %s.\n",
+ VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
dev->ifname);
allocerr_warned = true;
}
if (virtio_net_with_host_offload(dev)) {
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
- vhost_dequeue_offload(hdr, pkts[i], legacy_ol_flags);
+ vhost_dequeue_offload(dev, hdr, pkts[i], legacy_ol_flags);
}
}
if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "Failed mbuf alloc of size %d from %s on %s.\n",
- buf_len, mbuf_pool->name, dev->ifname);
+ VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
+ dev->ifname, buf_len, mbuf_pool->name);
allocerr_warned = true;
}
return -1;
mbuf_pool, legacy_ol_flags);
if (unlikely(err)) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "Failed to copy desc to mbuf on %s.\n",
+ VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
dev->ifname);
allocerr_warned = true;
}
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: built-in vhost net backend is disabled.\n",
- dev->vid, __func__);
+ VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
+ dev->ifname, __func__);
return 0;
}
if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
if (rarp_mbuf == NULL) {
- VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
+ VHOST_LOG_DATA(ERR, "(%s) failed to make RARP packet.\n", dev->ifname);
count = 0;
goto out;
}
+ /*
+ * Inject it to the head of "pkts" array, so that switch's mac
+ * learning table will get updated first.
+ */
+ pkts[0] = rarp_mbuf;
+ pkts++;
count -= 1;
}
out_access_unlock:
rte_spinlock_unlock(&vq->access_lock);
- if (unlikely(rarp_mbuf != NULL)) {
- /*
- * Inject it to the head of "pkts" array, so that switch's mac
- * learning table will get updated first.
- */
- memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
- pkts[0] = rarp_mbuf;
+ if (unlikely(rarp_mbuf != NULL))
count += 1;
- }
return count;
}