return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
}
+/*
+ * This function must be called with virtqueue's access_lock taken.
+ */
+static inline void
+vhost_queue_stats_update(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtqueue_stats *stats = &vq->stats;
+ int i;
+
+ if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
+ return;
+
+ for (i = 0; i < count; i++) {
+ struct rte_ether_addr *ea;
+ struct rte_mbuf *pkt = pkts[i];
+ uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt);
+
+ stats->packets++;
+ stats->bytes += pkt_len;
+
+ if (pkt_len == 64) {
+ stats->size_bins[1]++;
+ } else if (pkt_len > 64 && pkt_len < 1024) {
+ uint32_t bin;
+
+ /* count zeros, and offset into correct bin */
+ bin = (sizeof(pkt_len) * 8) - __builtin_clz(pkt_len) - 5;
+ stats->size_bins[bin]++;
+ } else {
+ if (pkt_len < 64)
+ stats->size_bins[0]++;
+ else if (pkt_len < 1519)
+ stats->size_bins[6]++;
+ else
+ stats->size_bins[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(pkt, struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
+ stats->broadcast++;
+ else
+ stats->multicast++;
+ }
+ }
+}
+
static __rte_always_inline int64_t
vhost_async_dma_transfer_one(struct virtio_net *dev, struct vhost_virtqueue *vq,
int16_t dma_id, uint16_t vchan_id, uint16_t flag_idx,
}
static __rte_always_inline int
-async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
+async_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf *m, uint32_t mbuf_offset,
- uint64_t buf_iova, uint32_t cpy_len)
+ uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
{
struct vhost_async *async = vq->async;
uint64_t mapped_len;
uint32_t buf_offset = 0;
+ void *src, *dst;
void *host_iova;
while (cpy_len) {
return -1;
}
- if (unlikely(async_iter_add_iovec(dev, async,
- (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
- mbuf_offset),
- host_iova, (size_t)mapped_len)))
+ if (to_desc) {
+ src = (void *)(uintptr_t)rte_pktmbuf_iova_offset(m, mbuf_offset);
+ dst = host_iova;
+ } else {
+ src = host_iova;
+ dst = (void *)(uintptr_t)rte_pktmbuf_iova_offset(m, mbuf_offset);
+ }
+
+ if (unlikely(async_iter_add_iovec(dev, async, src, dst, (size_t)mapped_len)))
return -1;
cpy_len -= (uint32_t)mapped_len;
}
static __rte_always_inline void
-sync_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
+sync_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf *m, uint32_t mbuf_offset,
- uint64_t buf_addr, uint64_t buf_iova, uint32_t cpy_len)
+ uint64_t buf_addr, uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
{
struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
if (likely(cpy_len > MAX_BATCH_LEN || vq->batch_copy_nb_elems >= vq->size)) {
- rte_memcpy((void *)((uintptr_t)(buf_addr)),
+ if (to_desc) {
+ rte_memcpy((void *)((uintptr_t)(buf_addr)),
rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
cpy_len);
+ } else {
+ rte_memcpy(rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
+ (void *)((uintptr_t)(buf_addr)),
+ cpy_len);
+ }
vhost_log_cache_write_iova(dev, vq, buf_iova, cpy_len);
PRINT_PACKET(dev, (uintptr_t)(buf_addr), cpy_len, 0);
} else {
- batch_copy[vq->batch_copy_nb_elems].dst =
- (void *)((uintptr_t)(buf_addr));
- batch_copy[vq->batch_copy_nb_elems].src =
- rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
+ if (to_desc) {
+ batch_copy[vq->batch_copy_nb_elems].dst =
+ (void *)((uintptr_t)(buf_addr));
+ batch_copy[vq->batch_copy_nb_elems].src =
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
+ } else {
+ batch_copy[vq->batch_copy_nb_elems].dst =
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
+ batch_copy[vq->batch_copy_nb_elems].src =
+ (void *)((uintptr_t)(buf_addr));
+ }
batch_copy[vq->batch_copy_nb_elems].log_addr = buf_iova;
batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
vq->batch_copy_nb_elems++;
cpy_len = RTE_MIN(buf_avail, mbuf_avail);
if (is_async) {
- if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
- buf_iova + buf_offset, cpy_len) < 0)
+ if (async_fill_seg(dev, vq, m, mbuf_offset,
+ buf_iova + buf_offset, cpy_len, true) < 0)
goto error;
} else {
- sync_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
- buf_addr + buf_offset,
- buf_iova + buf_offset, cpy_len);
+ sync_fill_seg(dev, vq, m, mbuf_offset,
+ buf_addr + buf_offset,
+ buf_iova + buf_offset, cpy_len, true);
}
mbuf_avail -= cpy_len;
else
nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
+ vhost_queue_stats_update(dev, vq, pkts, nb_tx);
+
out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_unlock(vq);
n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
+ vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
+ vq->stats.inflight_completed += n_pkts_cpl;
+
out:
rte_spinlock_unlock(&vq->access_lock);
vq = dev->virtqueue[queue_id];
+ if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) {
+ VHOST_LOG_DATA(ERR, "(%s) %s() called without access lock taken.\n",
+ dev->ifname, __func__);
+ return -1;
+ }
+
if (unlikely(!vq->async)) {
VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %d.\n",
dev->ifname, __func__, queue_id);
n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
+ vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
+ vq->stats.inflight_completed += n_pkts_cpl;
+
return n_pkts_cpl;
}
nb_tx = virtio_dev_rx_async_submit_split(dev, vq, queue_id,
pkts, count, dma_id, vchan_id);
+ vq->stats.inflight_submitted += nb_tx;
+
out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_unlock(vq);
}
static __rte_always_inline int
-copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
+desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct buf_vector *buf_vec, uint16_t nr_vec,
struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
- bool legacy_ol_flags)
+ bool legacy_ol_flags, uint16_t slot_idx, bool is_async)
{
- uint32_t buf_avail, buf_offset;
- uint64_t buf_addr, buf_len;
+ uint32_t buf_avail, buf_offset, buf_len;
+ uint64_t buf_addr, buf_iova;
uint32_t mbuf_avail, mbuf_offset;
uint32_t cpy_len;
struct rte_mbuf *cur = m, *prev = m;
struct virtio_net_hdr *hdr = NULL;
/* A counter to avoid desc dead loop chain */
uint16_t vec_idx = 0;
- struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
- int error = 0;
+ struct vhost_async *async = vq->async;
+ struct async_inflight_info *pkts_info;
buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
- if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
- error = -1;
- goto out;
- }
+ if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1))
+ return -1;
if (virtio_net_with_host_offload(dev)) {
if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
buf_offset = dev->vhost_hlen - buf_len;
vec_idx++;
buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
buf_avail = buf_len - buf_offset;
} else if (buf_len == dev->vhost_hlen) {
if (unlikely(++vec_idx >= nr_vec))
- goto out;
+ goto error;
buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
buf_offset = 0;
mbuf_offset = 0;
mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
+
+ if (is_async) {
+ pkts_info = async->pkts_info;
+ if (async_iter_initialize(dev, async))
+ return -1;
+ }
+
while (1) {
cpy_len = RTE_MIN(buf_avail, mbuf_avail);
- if (likely(cpy_len > MAX_BATCH_LEN ||
- vq->batch_copy_nb_elems >= vq->size ||
- (hdr && cur == m))) {
- rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
- mbuf_offset),
- (void *)((uintptr_t)(buf_addr +
- buf_offset)), cpy_len);
+ if (is_async) {
+ if (async_fill_seg(dev, vq, cur, mbuf_offset,
+ buf_iova + buf_offset, cpy_len, false) < 0)
+ goto error;
} else {
- batch_copy[vq->batch_copy_nb_elems].dst =
- rte_pktmbuf_mtod_offset(cur, void *,
- mbuf_offset);
- batch_copy[vq->batch_copy_nb_elems].src =
- (void *)((uintptr_t)(buf_addr + buf_offset));
- batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
- vq->batch_copy_nb_elems++;
+ sync_fill_seg(dev, vq, cur, mbuf_offset,
+ buf_addr + buf_offset,
+ buf_iova + buf_offset, cpy_len, false);
}
mbuf_avail -= cpy_len;
break;
buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
buf_offset = 0;
if (unlikely(cur == NULL)) {
VHOST_LOG_DATA(ERR, "(%s) failed to allocate memory for mbuf.\n",
dev->ifname);
- error = -1;
- goto out;
+ goto error;
}
prev->next = cur;
prev->data_len = mbuf_offset;
m->pkt_len += mbuf_offset;
- if (hdr)
- vhost_dequeue_offload(dev, hdr, m, legacy_ol_flags);
+ if (is_async) {
+ async_iter_finalize(async);
+ if (hdr)
+ pkts_info[slot_idx].nethdr = *hdr;
+ } else {
+ if (hdr)
+ vhost_dequeue_offload(dev, hdr, m, legacy_ol_flags);
+ }
-out:
+ return 0;
+error:
+ if (is_async)
+ async_iter_cancel(async);
- return error;
+ return -1;
}
static void
break;
}
- err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
- mbuf_pool, legacy_ol_flags);
+ err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
+ mbuf_pool, legacy_ol_flags, 0, false);
if (unlikely(err)) {
if (!allocerr_warned) {
VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
i++;
break;
}
+
}
if (dropped)
return -1;
}
- err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
- mbuf_pool, legacy_ol_flags);
+ err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
+ mbuf_pool, legacy_ol_flags, 0, false);
if (unlikely(err)) {
if (!allocerr_warned) {
VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
* learning table will get updated first.
*/
pkts[0] = rarp_mbuf;
+ vhost_queue_stats_update(dev, vq, pkts, 1);
pkts++;
count -= 1;
}
count = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
}
+ vhost_queue_stats_update(dev, vq, pkts, count);
+
+out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
+out_access_unlock:
+ rte_spinlock_unlock(&vq->access_lock);
+
+ if (unlikely(rarp_mbuf != NULL))
+ count += 1;
+
+ return count;
+}
+
+static __rte_always_inline uint16_t
+async_poll_dequeue_completed_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
+ uint16_t vchan_id, bool legacy_ol_flags)
+{
+ uint16_t start_idx, from, i;
+ uint16_t nr_cpl_pkts = 0;
+ struct async_inflight_info *pkts_info = vq->async->pkts_info;
+
+ vhost_async_dma_check_completed(dev, dma_id, vchan_id, VHOST_DMA_MAX_COPY_COMPLETE);
+
+ start_idx = async_get_first_inflight_pkt_idx(vq);
+
+ from = start_idx;
+ while (vq->async->pkts_cmpl_flag[from] && count--) {
+ vq->async->pkts_cmpl_flag[from] = false;
+ from = (from + 1) & (vq->size - 1);
+ nr_cpl_pkts++;
+ }
+
+ if (nr_cpl_pkts == 0)
+ return 0;
+
+ for (i = 0; i < nr_cpl_pkts; i++) {
+ from = (start_idx + i) & (vq->size - 1);
+ pkts[i] = pkts_info[from].mbuf;
+
+ if (virtio_net_with_host_offload(dev))
+ vhost_dequeue_offload(dev, &pkts_info[from].nethdr, pkts[i],
+ legacy_ol_flags);
+ }
+
+ /* write back completed descs to used ring and update used idx */
+ write_back_completed_descs_split(vq, nr_cpl_pkts);
+ __atomic_add_fetch(&vq->used->idx, nr_cpl_pkts, __ATOMIC_RELEASE);
+ vhost_vring_call_split(dev, vq);
+
+ vq->async->pkts_inflight_n -= nr_cpl_pkts;
+
+ return nr_cpl_pkts;
+}
+
+static __rte_always_inline uint16_t
+virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
+ int16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
+{
+ static bool allocerr_warned;
+ bool dropped = false;
+ uint16_t free_entries;
+ uint16_t pkt_idx, slot_idx = 0;
+ uint16_t nr_done_pkts = 0;
+ uint16_t pkt_err = 0;
+ uint16_t n_xfer;
+ struct vhost_async *async = vq->async;
+ struct async_inflight_info *pkts_info = async->pkts_info;
+ struct rte_mbuf *pkts_prealloc[MAX_PKT_BURST];
+ uint16_t pkts_size = count;
+
+ /**
+ * The ordering between avail index and
+ * desc reads needs to be enforced.
+ */
+ free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
+ vq->last_avail_idx;
+ if (free_entries == 0)
+ goto out;
+
+ rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
+
+ async_iter_reset(async);
+
+ count = RTE_MIN(count, MAX_PKT_BURST);
+ count = RTE_MIN(count, free_entries);
+ VHOST_LOG_DATA(DEBUG, "(%s) about to dequeue %u buffers\n",
+ dev->ifname, count);
+
+ if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts_prealloc, count))
+ goto out;
+
+ for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+ uint16_t head_idx = 0;
+ uint16_t nr_vec = 0;
+ uint16_t to;
+ uint32_t buf_len;
+ int err;
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+ struct rte_mbuf *pkt = pkts_prealloc[pkt_idx];
+
+ if (unlikely(fill_vec_buf_split(dev, vq, vq->last_avail_idx,
+ &nr_vec, buf_vec,
+ &head_idx, &buf_len,
+ VHOST_ACCESS_RO) < 0)) {
+ dropped = true;
+ break;
+ }
+
+ err = virtio_dev_pktmbuf_prep(dev, pkt, buf_len);
+ if (unlikely(err)) {
+ /**
+ * mbuf allocation fails for jumbo packets when external
+ * buffer allocation is not allowed and linear buffer
+ * is required. Drop this packet.
+ */
+ if (!allocerr_warned) {
+ VHOST_LOG_DATA(ERR,
+ "(%s) %s: Failed mbuf alloc of size %d from %s\n",
+ dev->ifname, __func__, buf_len, mbuf_pool->name);
+ allocerr_warned = true;
+ }
+ dropped = true;
+ break;
+ }
+
+ slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
+ err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkt, mbuf_pool,
+ legacy_ol_flags, slot_idx, true);
+ if (unlikely(err)) {
+ if (!allocerr_warned) {
+ VHOST_LOG_DATA(ERR,
+ "(%s) %s: Failed to offload copies to async channel.\n",
+ dev->ifname, __func__);
+ allocerr_warned = true;
+ }
+ dropped = true;
+ break;
+ }
+
+ pkts_info[slot_idx].mbuf = pkt;
+
+ /* store used descs */
+ to = async->desc_idx_split & (vq->size - 1);
+ async->descs_split[to].id = head_idx;
+ async->descs_split[to].len = 0;
+ async->desc_idx_split++;
+
+ vq->last_avail_idx++;
+ }
+
+ if (unlikely(dropped))
+ rte_pktmbuf_free_bulk(&pkts_prealloc[pkt_idx], count - pkt_idx);
+
+ n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
+ async->iov_iter, pkt_idx);
+
+ async->pkts_inflight_n += n_xfer;
+
+ pkt_err = pkt_idx - n_xfer;
+ if (unlikely(pkt_err)) {
+ VHOST_LOG_DATA(DEBUG, "(%s) %s: failed to transfer data.\n",
+ dev->ifname, __func__);
+
+ pkt_idx = n_xfer;
+ /* recover available ring */
+ vq->last_avail_idx -= pkt_err;
+
+ /**
+ * recover async channel copy related structures and free pktmbufs
+ * for error pkts.
+ */
+ async->desc_idx_split -= pkt_err;
+ while (pkt_err-- > 0) {
+ rte_pktmbuf_free(pkts_info[slot_idx & (vq->size - 1)].mbuf);
+ slot_idx--;
+ }
+ }
+
+ async->pkts_idx += pkt_idx;
+ if (async->pkts_idx >= vq->size)
+ async->pkts_idx -= vq->size;
+
+out:
+ /* DMA device may serve other queues, unconditionally check completed. */
+ nr_done_pkts = async_poll_dequeue_completed_split(dev, vq, pkts, pkts_size,
+ dma_id, vchan_id, legacy_ol_flags);
+
+ return nr_done_pkts;
+}
+
+__rte_noinline
+static uint16_t
+virtio_dev_tx_async_split_legacy(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count,
+ int16_t dma_id, uint16_t vchan_id)
+{
+ return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
+ pkts, count, dma_id, vchan_id, true);
+}
+
+__rte_noinline
+static uint16_t
+virtio_dev_tx_async_split_compliant(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count,
+ int16_t dma_id, uint16_t vchan_id)
+{
+ return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
+ pkts, count, dma_id, vchan_id, false);
+}
+
+uint16_t
+rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
+ int *nr_inflight, int16_t dma_id, uint16_t vchan_id)
+{
+ struct virtio_net *dev;
+ struct rte_mbuf *rarp_mbuf = NULL;
+ struct vhost_virtqueue *vq;
+ int16_t success = 1;
+
+ dev = get_device(vid);
+ if (!dev || !nr_inflight)
+ return 0;
+
+ *nr_inflight = -1;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
+ dev->ifname, __func__);
+ return 0;
+ }
+
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
+ return 0;
+ }
+
+ if (unlikely(dma_id < 0 || dma_id >= RTE_DMADEV_DEFAULT_MAX)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid dma id %d.\n",
+ dev->ifname, __func__, dma_id);
+ return 0;
+ }
+
+ if (unlikely(!dma_copy_track[dma_id].vchans ||
+ !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
+ dma_id, vchan_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
+ return 0;
+
+ if (unlikely(vq->enabled == 0)) {
+ count = 0;
+ goto out_access_unlock;
+ }
+
+ if (unlikely(!vq->async)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
+ count = 0;
+ goto out_access_unlock;
+ }
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0))
+ if (unlikely(vring_translate(dev, vq) < 0)) {
+ count = 0;
+ goto out;
+ }
+
+ /*
+ * Construct a RARP broadcast packet, and inject it to the "pkts"
+ * array, to looks like that guest actually send such packet.
+ *
+ * Check user_send_rarp() for more information.
+ *
+ * broadcast_rarp shares a cacheline in the virtio_net structure
+ * with some fields that are accessed during enqueue and
+ * __atomic_compare_exchange_n causes a write if performed compare
+ * and exchange. This could result in false sharing between enqueue
+ * and dequeue.
+ *
+ * Prevent unnecessary false sharing by reading broadcast_rarp first
+ * and only performing compare and exchange if the read indicates it
+ * is likely to be set.
+ */
+ if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
+ __atomic_compare_exchange_n(&dev->broadcast_rarp,
+ &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
+
+ rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
+ if (rarp_mbuf == NULL) {
+ VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
+ count = 0;
+ goto out;
+ }
+ /*
+ * Inject it to the head of "pkts" array, so that switch's mac
+ * learning table will get updated first.
+ */
+ pkts[0] = rarp_mbuf;
+ pkts++;
+ count -= 1;
+ }
+
+ if (unlikely(vq_is_packed(dev))) {
+ static bool not_support_pack_log;
+ if (!not_support_pack_log) {
+ VHOST_LOG_DATA(ERR,
+ "(%s) %s: async dequeue does not support packed ring.\n",
+ dev->ifname, __func__);
+ not_support_pack_log = true;
+ }
+ count = 0;
+ goto out;
+ }
+
+ if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
+ count = virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool, pkts,
+ count, dma_id, vchan_id);
+ else
+ count = virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool, pkts,
+ count, dma_id, vchan_id);
+
+ *nr_inflight = vq->async->pkts_inflight_n;
+
out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_unlock(vq);