1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
7 #include <linux/virtio_net.h>
10 #include <rte_memcpy.h>
12 #include <rte_ether.h>
14 #include <rte_dmadev.h>
15 #include <rte_vhost.h>
20 #include <rte_spinlock.h>
21 #include <rte_malloc.h>
22 #include <rte_vhost_async.h>
27 #define MAX_BATCH_LEN 256
29 /* DMA device copy operation tracking array. */
30 struct async_dma_info dma_copy_track[RTE_DMADEV_DEFAULT_MAX];
32 static __rte_always_inline bool
33 rxvq_is_mergeable(struct virtio_net *dev)
35 return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
38 static __rte_always_inline bool
39 virtio_net_is_inorder(struct virtio_net *dev)
41 return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
45 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
47 return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
51 * This function must be called with virtqueue's access_lock taken.
54 vhost_queue_stats_update(struct virtio_net *dev, struct vhost_virtqueue *vq,
55 struct rte_mbuf **pkts, uint16_t count)
57 struct virtqueue_stats *stats = &vq->stats;
60 if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
63 for (i = 0; i < count; i++) {
64 struct rte_ether_addr *ea;
65 struct rte_mbuf *pkt = pkts[i];
66 uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt);
69 stats->bytes += pkt_len;
72 stats->size_bins[1]++;
73 } else if (pkt_len > 64 && pkt_len < 1024) {
76 /* count zeros, and offset into correct bin */
77 bin = (sizeof(pkt_len) * 8) - __builtin_clz(pkt_len) - 5;
78 stats->size_bins[bin]++;
81 stats->size_bins[0]++;
82 else if (pkt_len < 1519)
83 stats->size_bins[6]++;
85 stats->size_bins[7]++;
88 ea = rte_pktmbuf_mtod(pkt, struct rte_ether_addr *);
89 if (rte_is_multicast_ether_addr(ea)) {
90 if (rte_is_broadcast_ether_addr(ea))
98 static __rte_always_inline int64_t
99 vhost_async_dma_transfer_one(struct virtio_net *dev, struct vhost_virtqueue *vq,
100 int16_t dma_id, uint16_t vchan_id, uint16_t flag_idx,
101 struct vhost_iov_iter *pkt)
103 struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
104 uint16_t ring_mask = dma_info->ring_mask;
105 static bool vhost_async_dma_copy_log;
108 struct vhost_iovec *iov = pkt->iov;
110 uint32_t nr_segs = pkt->nr_segs;
113 if (rte_dma_burst_capacity(dma_id, vchan_id) < nr_segs)
116 for (i = 0; i < nr_segs; i++) {
117 copy_idx = rte_dma_copy(dma_id, vchan_id, (rte_iova_t)iov[i].src_addr,
118 (rte_iova_t)iov[i].dst_addr, iov[i].len, RTE_DMA_OP_FLAG_LLC);
120 * Since all memory is pinned and DMA vChannel
121 * ring has enough space, failure should be a
122 * rare case. If failure happens, it means DMA
123 * device encounters serious errors; in this
124 * case, please stop async data-path and check
125 * what has happened to DMA device.
127 if (unlikely(copy_idx < 0)) {
128 if (!vhost_async_dma_copy_log) {
129 VHOST_LOG_DATA(ERR, "(%s) DMA copy failed for channel %d:%u\n",
130 dev->ifname, dma_id, vchan_id);
131 vhost_async_dma_copy_log = true;
138 * Only store packet completion flag address in the last copy's
139 * slot, and other slots are set to NULL.
141 dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask] = &vq->async->pkts_cmpl_flag[flag_idx];
146 static __rte_always_inline uint16_t
147 vhost_async_dma_transfer(struct virtio_net *dev, struct vhost_virtqueue *vq,
148 int16_t dma_id, uint16_t vchan_id, uint16_t head_idx,
149 struct vhost_iov_iter *pkts, uint16_t nr_pkts)
151 struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
152 int64_t ret, nr_copies = 0;
155 rte_spinlock_lock(&dma_info->dma_lock);
157 for (pkt_idx = 0; pkt_idx < nr_pkts; pkt_idx++) {
158 ret = vhost_async_dma_transfer_one(dev, vq, dma_id, vchan_id, head_idx,
160 if (unlikely(ret < 0))
165 if (head_idx >= vq->size)
166 head_idx -= vq->size;
169 if (likely(nr_copies > 0))
170 rte_dma_submit(dma_id, vchan_id);
172 rte_spinlock_unlock(&dma_info->dma_lock);
177 static __rte_always_inline uint16_t
178 vhost_async_dma_check_completed(struct virtio_net *dev, int16_t dma_id, uint16_t vchan_id,
181 struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
182 uint16_t ring_mask = dma_info->ring_mask;
183 uint16_t last_idx = 0;
187 bool has_error = false;
188 static bool vhost_async_dma_complete_log;
190 rte_spinlock_lock(&dma_info->dma_lock);
193 * Print error log for debugging, if DMA reports error during
194 * DMA transfer. We do not handle error in vhost level.
196 nr_copies = rte_dma_completed(dma_id, vchan_id, max_pkts, &last_idx, &has_error);
197 if (unlikely(!vhost_async_dma_complete_log && has_error)) {
198 VHOST_LOG_DATA(ERR, "(%s) DMA completion failure on channel %d:%u\n", dev->ifname,
200 vhost_async_dma_complete_log = true;
201 } else if (nr_copies == 0) {
205 copy_idx = last_idx - nr_copies + 1;
206 for (i = 0; i < nr_copies; i++) {
209 flag = dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask];
212 * Mark the packet flag as received. The flag
213 * could belong to another virtqueue but write
217 dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask] = NULL;
223 rte_spinlock_unlock(&dma_info->dma_lock);
228 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
230 struct batch_copy_elem *elem = vq->batch_copy_elems;
231 uint16_t count = vq->batch_copy_nb_elems;
234 for (i = 0; i < count; i++) {
235 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
236 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
238 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
241 vq->batch_copy_nb_elems = 0;
245 do_data_copy_dequeue(struct vhost_virtqueue *vq)
247 struct batch_copy_elem *elem = vq->batch_copy_elems;
248 uint16_t count = vq->batch_copy_nb_elems;
251 for (i = 0; i < count; i++)
252 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
254 vq->batch_copy_nb_elems = 0;
257 static __rte_always_inline void
258 do_flush_shadow_used_ring_split(struct virtio_net *dev,
259 struct vhost_virtqueue *vq,
260 uint16_t to, uint16_t from, uint16_t size)
262 rte_memcpy(&vq->used->ring[to],
263 &vq->shadow_used_split[from],
264 size * sizeof(struct vring_used_elem));
265 vhost_log_cache_used_vring(dev, vq,
266 offsetof(struct vring_used, ring[to]),
267 size * sizeof(struct vring_used_elem));
270 static __rte_always_inline void
271 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
273 uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
275 if (used_idx + vq->shadow_used_idx <= vq->size) {
276 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
277 vq->shadow_used_idx);
281 /* update used ring interval [used_idx, vq->size] */
282 size = vq->size - used_idx;
283 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
285 /* update the left half used ring interval [0, left_size] */
286 do_flush_shadow_used_ring_split(dev, vq, 0, size,
287 vq->shadow_used_idx - size);
289 vq->last_used_idx += vq->shadow_used_idx;
291 vhost_log_cache_sync(dev, vq);
293 __atomic_add_fetch(&vq->used->idx, vq->shadow_used_idx,
295 vq->shadow_used_idx = 0;
296 vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
297 sizeof(vq->used->idx));
300 static __rte_always_inline void
301 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
302 uint16_t desc_idx, uint32_t len)
304 uint16_t i = vq->shadow_used_idx++;
306 vq->shadow_used_split[i].id = desc_idx;
307 vq->shadow_used_split[i].len = len;
310 static __rte_always_inline void
311 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
312 struct vhost_virtqueue *vq)
315 uint16_t used_idx = vq->last_used_idx;
316 uint16_t head_idx = vq->last_used_idx;
317 uint16_t head_flags = 0;
319 /* Split loop in two to save memory barriers */
320 for (i = 0; i < vq->shadow_used_idx; i++) {
321 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
322 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
324 used_idx += vq->shadow_used_packed[i].count;
325 if (used_idx >= vq->size)
326 used_idx -= vq->size;
329 /* The ordering for storing desc flags needs to be enforced. */
330 rte_atomic_thread_fence(__ATOMIC_RELEASE);
332 for (i = 0; i < vq->shadow_used_idx; i++) {
335 if (vq->shadow_used_packed[i].len)
336 flags = VRING_DESC_F_WRITE;
340 if (vq->used_wrap_counter) {
341 flags |= VRING_DESC_F_USED;
342 flags |= VRING_DESC_F_AVAIL;
344 flags &= ~VRING_DESC_F_USED;
345 flags &= ~VRING_DESC_F_AVAIL;
349 vq->desc_packed[vq->last_used_idx].flags = flags;
351 vhost_log_cache_used_vring(dev, vq,
353 sizeof(struct vring_packed_desc),
354 sizeof(struct vring_packed_desc));
356 head_idx = vq->last_used_idx;
360 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
363 vq->desc_packed[head_idx].flags = head_flags;
365 vhost_log_cache_used_vring(dev, vq,
367 sizeof(struct vring_packed_desc),
368 sizeof(struct vring_packed_desc));
370 vq->shadow_used_idx = 0;
371 vhost_log_cache_sync(dev, vq);
374 static __rte_always_inline void
375 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
376 struct vhost_virtqueue *vq)
378 struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
380 vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
381 /* desc flags is the synchronization point for virtio packed vring */
382 __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
383 used_elem->flags, __ATOMIC_RELEASE);
385 vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
386 sizeof(struct vring_packed_desc),
387 sizeof(struct vring_packed_desc));
388 vq->shadow_used_idx = 0;
389 vhost_log_cache_sync(dev, vq);
392 static __rte_always_inline void
393 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
394 struct vhost_virtqueue *vq,
400 uint16_t last_used_idx;
401 struct vring_packed_desc *desc_base;
403 last_used_idx = vq->last_used_idx;
404 desc_base = &vq->desc_packed[last_used_idx];
406 flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
408 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
409 desc_base[i].id = ids[i];
410 desc_base[i].len = lens[i];
413 rte_atomic_thread_fence(__ATOMIC_RELEASE);
415 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
416 desc_base[i].flags = flags;
419 vhost_log_cache_used_vring(dev, vq, last_used_idx *
420 sizeof(struct vring_packed_desc),
421 sizeof(struct vring_packed_desc) *
423 vhost_log_cache_sync(dev, vq);
425 vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
428 static __rte_always_inline void
429 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
432 vq->shadow_used_packed[0].id = id;
434 if (!vq->shadow_used_idx) {
435 vq->shadow_last_used_idx = vq->last_used_idx;
436 vq->shadow_used_packed[0].flags =
437 PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
438 vq->shadow_used_packed[0].len = 0;
439 vq->shadow_used_packed[0].count = 1;
440 vq->shadow_used_idx++;
443 vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
446 static __rte_always_inline void
447 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
448 struct vhost_virtqueue *vq,
455 flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
457 if (!vq->shadow_used_idx) {
458 vq->shadow_last_used_idx = vq->last_used_idx;
459 vq->shadow_used_packed[0].id = ids[0];
460 vq->shadow_used_packed[0].len = 0;
461 vq->shadow_used_packed[0].count = 1;
462 vq->shadow_used_packed[0].flags = flags;
463 vq->shadow_used_idx++;
468 vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
469 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
470 vq->desc_packed[vq->last_used_idx + i].len = 0;
473 rte_atomic_thread_fence(__ATOMIC_RELEASE);
474 vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
475 vq->desc_packed[vq->last_used_idx + i].flags = flags;
477 vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
478 sizeof(struct vring_packed_desc),
479 sizeof(struct vring_packed_desc) *
481 vhost_log_cache_sync(dev, vq);
483 vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
486 static __rte_always_inline void
487 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
493 flags = vq->desc_packed[vq->last_used_idx].flags;
494 if (vq->used_wrap_counter) {
495 flags |= VRING_DESC_F_USED;
496 flags |= VRING_DESC_F_AVAIL;
498 flags &= ~VRING_DESC_F_USED;
499 flags &= ~VRING_DESC_F_AVAIL;
502 if (!vq->shadow_used_idx) {
503 vq->shadow_last_used_idx = vq->last_used_idx;
505 vq->shadow_used_packed[0].id = buf_id;
506 vq->shadow_used_packed[0].len = 0;
507 vq->shadow_used_packed[0].flags = flags;
508 vq->shadow_used_idx++;
510 vq->desc_packed[vq->last_used_idx].id = buf_id;
511 vq->desc_packed[vq->last_used_idx].len = 0;
512 vq->desc_packed[vq->last_used_idx].flags = flags;
515 vq_inc_last_used_packed(vq, count);
518 static __rte_always_inline void
519 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
525 vq->shadow_used_packed[0].id = buf_id;
527 flags = vq->desc_packed[vq->last_used_idx].flags;
528 if (vq->used_wrap_counter) {
529 flags |= VRING_DESC_F_USED;
530 flags |= VRING_DESC_F_AVAIL;
532 flags &= ~VRING_DESC_F_USED;
533 flags &= ~VRING_DESC_F_AVAIL;
536 if (!vq->shadow_used_idx) {
537 vq->shadow_last_used_idx = vq->last_used_idx;
538 vq->shadow_used_packed[0].len = 0;
539 vq->shadow_used_packed[0].flags = flags;
540 vq->shadow_used_idx++;
543 vq_inc_last_used_packed(vq, count);
546 static __rte_always_inline void
547 vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
551 uint16_t num_buffers)
555 for (i = 0; i < num_buffers; i++) {
556 /* enqueue shadow flush action aligned with batch num */
557 if (!vq->shadow_used_idx)
558 vq->shadow_aligned_idx = vq->last_used_idx &
560 vq->shadow_used_packed[vq->shadow_used_idx].id = id[i];
561 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
562 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
563 vq->shadow_aligned_idx += count[i];
564 vq->shadow_used_idx++;
568 static __rte_always_inline void
569 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
570 struct vhost_virtqueue *vq,
574 uint16_t num_buffers)
576 vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
578 if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
579 do_data_copy_enqueue(dev, vq);
580 vhost_flush_enqueue_shadow_packed(dev, vq);
584 /* avoid write operation when necessary, to lessen cache issues */
585 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
586 if ((var) != (val)) \
590 static __rte_always_inline void
591 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
593 uint64_t csum_l4 = m_buf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
595 if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
596 csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
599 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
600 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
603 case RTE_MBUF_F_TX_TCP_CKSUM:
604 net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
607 case RTE_MBUF_F_TX_UDP_CKSUM:
608 net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
611 case RTE_MBUF_F_TX_SCTP_CKSUM:
612 net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
617 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
618 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
619 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
622 /* IP cksum verification cannot be bypassed, then calculate here */
623 if (m_buf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
624 struct rte_ipv4_hdr *ipv4_hdr;
626 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
628 ipv4_hdr->hdr_checksum = 0;
629 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
632 if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
633 if (m_buf->ol_flags & RTE_MBUF_F_TX_IPV4)
634 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
636 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
637 net_hdr->gso_size = m_buf->tso_segsz;
638 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
640 } else if (m_buf->ol_flags & RTE_MBUF_F_TX_UDP_SEG) {
641 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
642 net_hdr->gso_size = m_buf->tso_segsz;
643 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
646 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
647 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
648 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
652 static __rte_always_inline int
653 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
654 struct buf_vector *buf_vec, uint16_t *vec_idx,
655 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
657 uint16_t vec_id = *vec_idx;
661 uint64_t desc_chunck_len = desc_len;
663 if (unlikely(vec_id >= BUF_VECTOR_MAX))
666 desc_addr = vhost_iova_to_vva(dev, vq,
670 if (unlikely(!desc_addr))
673 rte_prefetch0((void *)(uintptr_t)desc_addr);
675 buf_vec[vec_id].buf_iova = desc_iova;
676 buf_vec[vec_id].buf_addr = desc_addr;
677 buf_vec[vec_id].buf_len = desc_chunck_len;
679 desc_len -= desc_chunck_len;
680 desc_iova += desc_chunck_len;
688 static __rte_always_inline int
689 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
690 uint32_t avail_idx, uint16_t *vec_idx,
691 struct buf_vector *buf_vec, uint16_t *desc_chain_head,
692 uint32_t *desc_chain_len, uint8_t perm)
694 uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
695 uint16_t vec_id = *vec_idx;
698 uint32_t nr_descs = vq->size;
700 struct vring_desc *descs = vq->desc;
701 struct vring_desc *idesc = NULL;
703 if (unlikely(idx >= vq->size))
706 *desc_chain_head = idx;
708 if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
709 dlen = vq->desc[idx].len;
710 nr_descs = dlen / sizeof(struct vring_desc);
711 if (unlikely(nr_descs > vq->size))
714 descs = (struct vring_desc *)(uintptr_t)
715 vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
718 if (unlikely(!descs))
721 if (unlikely(dlen < vq->desc[idx].len)) {
723 * The indirect desc table is not contiguous
724 * in process VA space, we have to copy it.
726 idesc = vhost_alloc_copy_ind_table(dev, vq,
727 vq->desc[idx].addr, vq->desc[idx].len);
728 if (unlikely(!idesc))
738 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
739 free_ind_table(idesc);
743 dlen = descs[idx].len;
746 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
747 descs[idx].addr, dlen,
749 free_ind_table(idesc);
753 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
756 idx = descs[idx].next;
759 *desc_chain_len = len;
762 if (unlikely(!!idesc))
763 free_ind_table(idesc);
769 * Returns -1 on fail, 0 on success
772 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
773 uint32_t size, struct buf_vector *buf_vec,
774 uint16_t *num_buffers, uint16_t avail_head,
778 uint16_t vec_idx = 0;
779 uint16_t max_tries, tries = 0;
781 uint16_t head_idx = 0;
785 cur_idx = vq->last_avail_idx;
787 if (rxvq_is_mergeable(dev))
788 max_tries = vq->size - 1;
793 if (unlikely(cur_idx == avail_head))
796 * if we tried all available ring items, and still
797 * can't get enough buf, it means something abnormal
800 if (unlikely(++tries > max_tries))
803 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
806 VHOST_ACCESS_RW) < 0))
808 len = RTE_MIN(len, size);
809 update_shadow_used_ring_split(vq, head_idx, len);
821 static __rte_always_inline int
822 fill_vec_buf_packed_indirect(struct virtio_net *dev,
823 struct vhost_virtqueue *vq,
824 struct vring_packed_desc *desc, uint16_t *vec_idx,
825 struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
829 uint16_t vec_id = *vec_idx;
831 struct vring_packed_desc *descs, *idescs = NULL;
834 descs = (struct vring_packed_desc *)(uintptr_t)
835 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
836 if (unlikely(!descs))
839 if (unlikely(dlen < desc->len)) {
841 * The indirect desc table is not contiguous
842 * in process VA space, we have to copy it.
844 idescs = vhost_alloc_copy_ind_table(dev,
845 vq, desc->addr, desc->len);
846 if (unlikely(!idescs))
852 nr_descs = desc->len / sizeof(struct vring_packed_desc);
853 if (unlikely(nr_descs >= vq->size)) {
854 free_ind_table(idescs);
858 for (i = 0; i < nr_descs; i++) {
859 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
860 free_ind_table(idescs);
866 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
873 if (unlikely(!!idescs))
874 free_ind_table(idescs);
879 static __rte_always_inline int
880 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
881 uint16_t avail_idx, uint16_t *desc_count,
882 struct buf_vector *buf_vec, uint16_t *vec_idx,
883 uint16_t *buf_id, uint32_t *len, uint8_t perm)
885 bool wrap_counter = vq->avail_wrap_counter;
886 struct vring_packed_desc *descs = vq->desc_packed;
887 uint16_t vec_id = *vec_idx;
890 if (avail_idx < vq->last_avail_idx)
894 * Perform a load-acquire barrier in desc_is_avail to
895 * enforce the ordering between desc flags and desc
898 if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
905 if (unlikely(vec_id >= BUF_VECTOR_MAX))
908 if (unlikely(*desc_count >= vq->size))
912 *buf_id = descs[avail_idx].id;
914 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
915 if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
921 dlen = descs[avail_idx].len;
924 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
925 descs[avail_idx].addr,
931 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
934 if (++avail_idx >= vq->size) {
935 avail_idx -= vq->size;
945 static __rte_noinline void
946 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
947 struct buf_vector *buf_vec,
948 struct virtio_net_hdr_mrg_rxbuf *hdr)
951 uint64_t remain = dev->vhost_hlen;
952 uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
953 uint64_t iova = buf_vec->buf_iova;
956 len = RTE_MIN(remain,
958 dst = buf_vec->buf_addr;
959 rte_memcpy((void *)(uintptr_t)dst,
960 (void *)(uintptr_t)src,
963 PRINT_PACKET(dev, (uintptr_t)dst,
965 vhost_log_cache_write_iova(dev, vq,
975 static __rte_always_inline int
976 async_iter_initialize(struct virtio_net *dev, struct vhost_async *async)
978 struct vhost_iov_iter *iter;
980 if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
981 VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
985 iter = async->iov_iter + async->iter_idx;
986 iter->iov = async->iovec + async->iovec_idx;
992 static __rte_always_inline int
993 async_iter_add_iovec(struct virtio_net *dev, struct vhost_async *async,
994 void *src, void *dst, size_t len)
996 struct vhost_iov_iter *iter;
997 struct vhost_iovec *iovec;
999 if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
1000 static bool vhost_max_async_vec_log;
1002 if (!vhost_max_async_vec_log) {
1003 VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
1004 vhost_max_async_vec_log = true;
1010 iter = async->iov_iter + async->iter_idx;
1011 iovec = async->iovec + async->iovec_idx;
1013 iovec->src_addr = src;
1014 iovec->dst_addr = dst;
1023 static __rte_always_inline void
1024 async_iter_finalize(struct vhost_async *async)
1029 static __rte_always_inline void
1030 async_iter_cancel(struct vhost_async *async)
1032 struct vhost_iov_iter *iter;
1034 iter = async->iov_iter + async->iter_idx;
1035 async->iovec_idx -= iter->nr_segs;
1040 static __rte_always_inline void
1041 async_iter_reset(struct vhost_async *async)
1043 async->iter_idx = 0;
1044 async->iovec_idx = 0;
1047 static __rte_always_inline int
1048 async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
1049 struct rte_mbuf *m, uint32_t mbuf_offset,
1050 uint64_t buf_iova, uint32_t cpy_len)
1052 struct vhost_async *async = vq->async;
1053 uint64_t mapped_len;
1054 uint32_t buf_offset = 0;
1058 host_iova = (void *)(uintptr_t)gpa_to_first_hpa(dev,
1059 buf_iova + buf_offset, cpy_len, &mapped_len);
1060 if (unlikely(!host_iova)) {
1061 VHOST_LOG_DATA(ERR, "(%s) %s: failed to get host iova.\n",
1062 dev->ifname, __func__);
1066 if (unlikely(async_iter_add_iovec(dev, async,
1067 (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
1069 host_iova, (size_t)mapped_len)))
1072 cpy_len -= (uint32_t)mapped_len;
1073 mbuf_offset += (uint32_t)mapped_len;
1074 buf_offset += (uint32_t)mapped_len;
1080 static __rte_always_inline void
1081 sync_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
1082 struct rte_mbuf *m, uint32_t mbuf_offset,
1083 uint64_t buf_addr, uint64_t buf_iova, uint32_t cpy_len)
1085 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1087 if (likely(cpy_len > MAX_BATCH_LEN || vq->batch_copy_nb_elems >= vq->size)) {
1088 rte_memcpy((void *)((uintptr_t)(buf_addr)),
1089 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
1091 vhost_log_cache_write_iova(dev, vq, buf_iova, cpy_len);
1092 PRINT_PACKET(dev, (uintptr_t)(buf_addr), cpy_len, 0);
1094 batch_copy[vq->batch_copy_nb_elems].dst =
1095 (void *)((uintptr_t)(buf_addr));
1096 batch_copy[vq->batch_copy_nb_elems].src =
1097 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
1098 batch_copy[vq->batch_copy_nb_elems].log_addr = buf_iova;
1099 batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
1100 vq->batch_copy_nb_elems++;
1104 static __rte_always_inline int
1105 mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
1106 struct rte_mbuf *m, struct buf_vector *buf_vec,
1107 uint16_t nr_vec, uint16_t num_buffers, bool is_async)
1109 uint32_t vec_idx = 0;
1110 uint32_t mbuf_offset, mbuf_avail;
1111 uint32_t buf_offset, buf_avail;
1112 uint64_t buf_addr, buf_iova, buf_len;
1115 struct rte_mbuf *hdr_mbuf;
1116 struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
1117 struct vhost_async *async = vq->async;
1119 if (unlikely(m == NULL))
1122 buf_addr = buf_vec[vec_idx].buf_addr;
1123 buf_iova = buf_vec[vec_idx].buf_iova;
1124 buf_len = buf_vec[vec_idx].buf_len;
1126 if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1))
1130 hdr_addr = buf_addr;
1131 if (unlikely(buf_len < dev->vhost_hlen)) {
1132 memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
1135 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
1137 VHOST_LOG_DATA(DEBUG, "(%s) RX: num merge buffers %d\n",
1138 dev->ifname, num_buffers);
1140 if (unlikely(buf_len < dev->vhost_hlen)) {
1141 buf_offset = dev->vhost_hlen - buf_len;
1143 buf_addr = buf_vec[vec_idx].buf_addr;
1144 buf_iova = buf_vec[vec_idx].buf_iova;
1145 buf_len = buf_vec[vec_idx].buf_len;
1146 buf_avail = buf_len - buf_offset;
1148 buf_offset = dev->vhost_hlen;
1149 buf_avail = buf_len - dev->vhost_hlen;
1152 mbuf_avail = rte_pktmbuf_data_len(m);
1156 if (async_iter_initialize(dev, async))
1160 while (mbuf_avail != 0 || m->next != NULL) {
1161 /* done with current buf, get the next one */
1162 if (buf_avail == 0) {
1164 if (unlikely(vec_idx >= nr_vec))
1167 buf_addr = buf_vec[vec_idx].buf_addr;
1168 buf_iova = buf_vec[vec_idx].buf_iova;
1169 buf_len = buf_vec[vec_idx].buf_len;
1172 buf_avail = buf_len;
1175 /* done with current mbuf, get the next one */
1176 if (mbuf_avail == 0) {
1180 mbuf_avail = rte_pktmbuf_data_len(m);
1184 virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
1185 if (rxvq_is_mergeable(dev))
1186 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
1189 if (unlikely(hdr == &tmp_hdr)) {
1190 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
1192 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
1193 dev->vhost_hlen, 0);
1194 vhost_log_cache_write_iova(dev, vq,
1195 buf_vec[0].buf_iova,
1202 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1205 if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
1206 buf_iova + buf_offset, cpy_len) < 0)
1209 sync_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
1210 buf_addr + buf_offset,
1211 buf_iova + buf_offset, cpy_len);
1214 mbuf_avail -= cpy_len;
1215 mbuf_offset += cpy_len;
1216 buf_avail -= cpy_len;
1217 buf_offset += cpy_len;
1221 async_iter_finalize(async);
1226 async_iter_cancel(async);
1231 static __rte_always_inline int
1232 vhost_enqueue_single_packed(struct virtio_net *dev,
1233 struct vhost_virtqueue *vq,
1234 struct rte_mbuf *pkt,
1235 struct buf_vector *buf_vec,
1238 uint16_t nr_vec = 0;
1239 uint16_t avail_idx = vq->last_avail_idx;
1240 uint16_t max_tries, tries = 0;
1241 uint16_t buf_id = 0;
1243 uint16_t desc_count;
1244 uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1245 uint16_t num_buffers = 0;
1246 uint32_t buffer_len[vq->size];
1247 uint16_t buffer_buf_id[vq->size];
1248 uint16_t buffer_desc_count[vq->size];
1250 if (rxvq_is_mergeable(dev))
1251 max_tries = vq->size - 1;
1257 * if we tried all available ring items, and still
1258 * can't get enough buf, it means something abnormal
1261 if (unlikely(++tries > max_tries))
1264 if (unlikely(fill_vec_buf_packed(dev, vq,
1265 avail_idx, &desc_count,
1268 VHOST_ACCESS_RW) < 0))
1271 len = RTE_MIN(len, size);
1274 buffer_len[num_buffers] = len;
1275 buffer_buf_id[num_buffers] = buf_id;
1276 buffer_desc_count[num_buffers] = desc_count;
1279 *nr_descs += desc_count;
1280 avail_idx += desc_count;
1281 if (avail_idx >= vq->size)
1282 avail_idx -= vq->size;
1285 if (mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers, false) < 0)
1288 vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1289 buffer_desc_count, num_buffers);
1294 static __rte_noinline uint32_t
1295 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1296 struct rte_mbuf **pkts, uint32_t count)
1298 uint32_t pkt_idx = 0;
1299 uint16_t num_buffers;
1300 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1301 uint16_t avail_head;
1304 * The ordering between avail index and
1305 * desc reads needs to be enforced.
1307 avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1309 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1311 for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1312 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1313 uint16_t nr_vec = 0;
1315 if (unlikely(reserve_avail_buf_split(dev, vq,
1316 pkt_len, buf_vec, &num_buffers,
1317 avail_head, &nr_vec) < 0)) {
1318 VHOST_LOG_DATA(DEBUG,
1319 "(%s) failed to get enough desc from vring\n",
1321 vq->shadow_used_idx -= num_buffers;
1325 VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
1326 dev->ifname, vq->last_avail_idx,
1327 vq->last_avail_idx + num_buffers);
1329 if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec,
1330 num_buffers, false) < 0) {
1331 vq->shadow_used_idx -= num_buffers;
1335 vq->last_avail_idx += num_buffers;
1338 do_data_copy_enqueue(dev, vq);
1340 if (likely(vq->shadow_used_idx)) {
1341 flush_shadow_used_ring_split(dev, vq);
1342 vhost_vring_call_split(dev, vq);
1348 static __rte_always_inline int
1349 virtio_dev_rx_sync_batch_check(struct virtio_net *dev,
1350 struct vhost_virtqueue *vq,
1351 struct rte_mbuf **pkts,
1352 uint64_t *desc_addrs,
1355 bool wrap_counter = vq->avail_wrap_counter;
1356 struct vring_packed_desc *descs = vq->desc_packed;
1357 uint16_t avail_idx = vq->last_avail_idx;
1358 uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1361 if (unlikely(avail_idx & PACKED_BATCH_MASK))
1364 if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1367 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1368 if (unlikely(pkts[i]->next != NULL))
1370 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1375 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1376 lens[i] = descs[avail_idx + i].len;
1378 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1379 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1383 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1384 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1385 descs[avail_idx + i].addr,
1389 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1390 if (unlikely(!desc_addrs[i]))
1392 if (unlikely(lens[i] != descs[avail_idx + i].len))
1399 static __rte_always_inline void
1400 virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,
1401 struct vhost_virtqueue *vq,
1402 struct rte_mbuf **pkts,
1403 uint64_t *desc_addrs,
1406 uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1407 struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1408 struct vring_packed_desc *descs = vq->desc_packed;
1409 uint16_t avail_idx = vq->last_avail_idx;
1410 uint16_t ids[PACKED_BATCH_SIZE];
1413 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1414 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1415 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1416 (uintptr_t)desc_addrs[i];
1417 lens[i] = pkts[i]->pkt_len +
1418 sizeof(struct virtio_net_hdr_mrg_rxbuf);
1421 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1422 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1424 vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1426 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1427 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1428 rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1432 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1433 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1436 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1437 ids[i] = descs[avail_idx + i].id;
1439 vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1442 static __rte_always_inline int
1443 virtio_dev_rx_sync_batch_packed(struct virtio_net *dev,
1444 struct vhost_virtqueue *vq,
1445 struct rte_mbuf **pkts)
1447 uint64_t desc_addrs[PACKED_BATCH_SIZE];
1448 uint64_t lens[PACKED_BATCH_SIZE];
1450 if (virtio_dev_rx_sync_batch_check(dev, vq, pkts, desc_addrs, lens) == -1)
1453 if (vq->shadow_used_idx) {
1454 do_data_copy_enqueue(dev, vq);
1455 vhost_flush_enqueue_shadow_packed(dev, vq);
1458 virtio_dev_rx_batch_packed_copy(dev, vq, pkts, desc_addrs, lens);
1463 static __rte_always_inline int16_t
1464 virtio_dev_rx_single_packed(struct virtio_net *dev,
1465 struct vhost_virtqueue *vq,
1466 struct rte_mbuf *pkt)
1468 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1469 uint16_t nr_descs = 0;
1471 if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1473 VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
1478 VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
1479 dev->ifname, vq->last_avail_idx,
1480 vq->last_avail_idx + nr_descs);
1482 vq_inc_last_avail_packed(vq, nr_descs);
1487 static __rte_noinline uint32_t
1488 virtio_dev_rx_packed(struct virtio_net *dev,
1489 struct vhost_virtqueue *__rte_restrict vq,
1490 struct rte_mbuf **__rte_restrict pkts,
1493 uint32_t pkt_idx = 0;
1496 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1498 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
1499 if (!virtio_dev_rx_sync_batch_packed(dev, vq,
1501 pkt_idx += PACKED_BATCH_SIZE;
1506 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1510 } while (pkt_idx < count);
1512 if (vq->shadow_used_idx) {
1513 do_data_copy_enqueue(dev, vq);
1514 vhost_flush_enqueue_shadow_packed(dev, vq);
1518 vhost_vring_call_packed(dev, vq);
1523 static __rte_always_inline uint32_t
1524 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1525 struct rte_mbuf **pkts, uint32_t count)
1527 struct vhost_virtqueue *vq;
1530 VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
1531 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1532 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
1533 dev->ifname, __func__, queue_id);
1537 vq = dev->virtqueue[queue_id];
1539 rte_spinlock_lock(&vq->access_lock);
1541 if (unlikely(!vq->enabled))
1542 goto out_access_unlock;
1544 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1545 vhost_user_iotlb_rd_lock(vq);
1547 if (unlikely(!vq->access_ok))
1548 if (unlikely(vring_translate(dev, vq) < 0))
1551 count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1555 if (vq_is_packed(dev))
1556 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1558 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1560 vhost_queue_stats_update(dev, vq, pkts, nb_tx);
1563 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1564 vhost_user_iotlb_rd_unlock(vq);
1567 rte_spinlock_unlock(&vq->access_lock);
1573 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1574 struct rte_mbuf **__rte_restrict pkts, uint16_t count)
1576 struct virtio_net *dev = get_device(vid);
1581 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1582 VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
1583 dev->ifname, __func__);
1587 return virtio_dev_rx(dev, queue_id, pkts, count);
1590 static __rte_always_inline uint16_t
1591 async_get_first_inflight_pkt_idx(struct vhost_virtqueue *vq)
1593 struct vhost_async *async = vq->async;
1595 if (async->pkts_idx >= async->pkts_inflight_n)
1596 return async->pkts_idx - async->pkts_inflight_n;
1598 return vq->size - async->pkts_inflight_n + async->pkts_idx;
1601 static __rte_always_inline void
1602 store_dma_desc_info_split(struct vring_used_elem *s_ring, struct vring_used_elem *d_ring,
1603 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1605 size_t elem_size = sizeof(struct vring_used_elem);
1607 if (d_idx + count <= ring_size) {
1608 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1610 uint16_t size = ring_size - d_idx;
1612 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1613 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1617 static __rte_always_inline void
1618 store_dma_desc_info_packed(struct vring_used_elem_packed *s_ring,
1619 struct vring_used_elem_packed *d_ring,
1620 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1622 size_t elem_size = sizeof(struct vring_used_elem_packed);
1624 if (d_idx + count <= ring_size) {
1625 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1627 uint16_t size = ring_size - d_idx;
1629 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1630 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1634 static __rte_noinline uint32_t
1635 virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1636 uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
1637 int16_t dma_id, uint16_t vchan_id)
1639 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1640 uint32_t pkt_idx = 0;
1641 uint16_t num_buffers;
1642 uint16_t avail_head;
1644 struct vhost_async *async = vq->async;
1645 struct async_inflight_info *pkts_info = async->pkts_info;
1646 uint32_t pkt_err = 0;
1648 uint16_t slot_idx = 0;
1651 * The ordering between avail index and desc reads need to be enforced.
1653 avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1655 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1657 async_iter_reset(async);
1659 for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1660 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1661 uint16_t nr_vec = 0;
1663 if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec,
1664 &num_buffers, avail_head, &nr_vec) < 0)) {
1665 VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
1667 vq->shadow_used_idx -= num_buffers;
1671 VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
1672 dev->ifname, vq->last_avail_idx, vq->last_avail_idx + num_buffers);
1674 if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers, true) < 0) {
1675 vq->shadow_used_idx -= num_buffers;
1679 slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
1680 pkts_info[slot_idx].descs = num_buffers;
1681 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1683 vq->last_avail_idx += num_buffers;
1686 if (unlikely(pkt_idx == 0))
1689 n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
1690 async->iov_iter, pkt_idx);
1692 pkt_err = pkt_idx - n_xfer;
1693 if (unlikely(pkt_err)) {
1694 uint16_t num_descs = 0;
1696 VHOST_LOG_DATA(DEBUG, "(%s) %s: failed to transfer %u packets for queue %u.\n",
1697 dev->ifname, __func__, pkt_err, queue_id);
1699 /* update number of completed packets */
1702 /* calculate the sum of descriptors to revert */
1703 while (pkt_err-- > 0) {
1704 num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
1708 /* recover shadow used ring and available ring */
1709 vq->shadow_used_idx -= num_descs;
1710 vq->last_avail_idx -= num_descs;
1713 /* keep used descriptors */
1714 if (likely(vq->shadow_used_idx)) {
1715 uint16_t to = async->desc_idx_split & (vq->size - 1);
1717 store_dma_desc_info_split(vq->shadow_used_split,
1718 async->descs_split, vq->size, 0, to,
1719 vq->shadow_used_idx);
1721 async->desc_idx_split += vq->shadow_used_idx;
1723 async->pkts_idx += pkt_idx;
1724 if (async->pkts_idx >= vq->size)
1725 async->pkts_idx -= vq->size;
1727 async->pkts_inflight_n += pkt_idx;
1728 vq->shadow_used_idx = 0;
1735 static __rte_always_inline int
1736 vhost_enqueue_async_packed(struct virtio_net *dev,
1737 struct vhost_virtqueue *vq,
1738 struct rte_mbuf *pkt,
1739 struct buf_vector *buf_vec,
1741 uint16_t *nr_buffers)
1743 uint16_t nr_vec = 0;
1744 uint16_t avail_idx = vq->last_avail_idx;
1745 uint16_t max_tries, tries = 0;
1746 uint16_t buf_id = 0;
1748 uint16_t desc_count = 0;
1749 uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1750 uint32_t buffer_len[vq->size];
1751 uint16_t buffer_buf_id[vq->size];
1752 uint16_t buffer_desc_count[vq->size];
1754 if (rxvq_is_mergeable(dev))
1755 max_tries = vq->size - 1;
1761 * if we tried all available ring items, and still
1762 * can't get enough buf, it means something abnormal
1765 if (unlikely(++tries > max_tries))
1768 if (unlikely(fill_vec_buf_packed(dev, vq,
1769 avail_idx, &desc_count,
1772 VHOST_ACCESS_RW) < 0))
1775 len = RTE_MIN(len, size);
1778 buffer_len[*nr_buffers] = len;
1779 buffer_buf_id[*nr_buffers] = buf_id;
1780 buffer_desc_count[*nr_buffers] = desc_count;
1782 *nr_descs += desc_count;
1783 avail_idx += desc_count;
1784 if (avail_idx >= vq->size)
1785 avail_idx -= vq->size;
1788 if (unlikely(mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, true) < 0))
1791 vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);
1796 static __rte_always_inline int16_t
1797 virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1798 struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers)
1800 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1802 if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec,
1803 nr_descs, nr_buffers) < 0)) {
1804 VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n", dev->ifname);
1808 VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
1809 dev->ifname, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
1814 static __rte_always_inline void
1815 dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
1816 uint32_t nr_err, uint32_t *pkt_idx)
1818 uint16_t descs_err = 0;
1819 uint16_t buffers_err = 0;
1820 struct async_inflight_info *pkts_info = vq->async->pkts_info;
1823 /* calculate the sum of buffers and descs of DMA-error packets. */
1824 while (nr_err-- > 0) {
1825 descs_err += pkts_info[slot_idx % vq->size].descs;
1826 buffers_err += pkts_info[slot_idx % vq->size].nr_buffers;
1830 if (vq->last_avail_idx >= descs_err) {
1831 vq->last_avail_idx -= descs_err;
1833 vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
1834 vq->avail_wrap_counter ^= 1;
1837 vq->shadow_used_idx -= buffers_err;
1840 static __rte_noinline uint32_t
1841 virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1842 uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
1843 int16_t dma_id, uint16_t vchan_id)
1845 uint32_t pkt_idx = 0;
1846 uint32_t remained = count;
1848 uint16_t num_buffers;
1851 struct vhost_async *async = vq->async;
1852 struct async_inflight_info *pkts_info = async->pkts_info;
1853 uint32_t pkt_err = 0;
1854 uint16_t slot_idx = 0;
1857 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1861 if (unlikely(virtio_dev_rx_async_packed(dev, vq, pkts[pkt_idx],
1862 &num_descs, &num_buffers) < 0))
1865 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
1867 pkts_info[slot_idx].descs = num_descs;
1868 pkts_info[slot_idx].nr_buffers = num_buffers;
1869 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1873 vq_inc_last_avail_packed(vq, num_descs);
1874 } while (pkt_idx < count);
1876 if (unlikely(pkt_idx == 0))
1879 n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
1880 async->iov_iter, pkt_idx);
1882 async_iter_reset(async);
1884 pkt_err = pkt_idx - n_xfer;
1885 if (unlikely(pkt_err)) {
1886 VHOST_LOG_DATA(DEBUG, "(%s) %s: failed to transfer %u packets for queue %u.\n",
1887 dev->ifname, __func__, pkt_err, queue_id);
1888 dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
1891 if (likely(vq->shadow_used_idx)) {
1892 /* keep used descriptors. */
1893 store_dma_desc_info_packed(vq->shadow_used_packed, async->buffers_packed,
1894 vq->size, 0, async->buffer_idx_packed,
1895 vq->shadow_used_idx);
1897 async->buffer_idx_packed += vq->shadow_used_idx;
1898 if (async->buffer_idx_packed >= vq->size)
1899 async->buffer_idx_packed -= vq->size;
1901 async->pkts_idx += pkt_idx;
1902 if (async->pkts_idx >= vq->size)
1903 async->pkts_idx -= vq->size;
1905 vq->shadow_used_idx = 0;
1906 async->pkts_inflight_n += pkt_idx;
1912 static __rte_always_inline void
1913 write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
1915 struct vhost_async *async = vq->async;
1916 uint16_t nr_left = n_descs;
1921 from = async->last_desc_idx_split & (vq->size - 1);
1922 nr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;
1923 to = vq->last_used_idx & (vq->size - 1);
1925 if (to + nr_copy <= vq->size) {
1926 rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1927 nr_copy * sizeof(struct vring_used_elem));
1929 uint16_t size = vq->size - to;
1931 rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1932 size * sizeof(struct vring_used_elem));
1933 rte_memcpy(&vq->used->ring[0], &async->descs_split[from + size],
1934 (nr_copy - size) * sizeof(struct vring_used_elem));
1937 async->last_desc_idx_split += nr_copy;
1938 vq->last_used_idx += nr_copy;
1940 } while (nr_left > 0);
1943 static __rte_always_inline void
1944 write_back_completed_descs_packed(struct vhost_virtqueue *vq,
1947 struct vhost_async *async = vq->async;
1948 uint16_t from = async->last_buffer_idx_packed;
1949 uint16_t used_idx = vq->last_used_idx;
1950 uint16_t head_idx = vq->last_used_idx;
1951 uint16_t head_flags = 0;
1954 /* Split loop in two to save memory barriers */
1955 for (i = 0; i < n_buffers; i++) {
1956 vq->desc_packed[used_idx].id = async->buffers_packed[from].id;
1957 vq->desc_packed[used_idx].len = async->buffers_packed[from].len;
1959 used_idx += async->buffers_packed[from].count;
1960 if (used_idx >= vq->size)
1961 used_idx -= vq->size;
1964 if (from >= vq->size)
1968 /* The ordering for storing desc flags needs to be enforced. */
1969 rte_atomic_thread_fence(__ATOMIC_RELEASE);
1971 from = async->last_buffer_idx_packed;
1973 for (i = 0; i < n_buffers; i++) {
1976 if (async->buffers_packed[from].len)
1977 flags = VRING_DESC_F_WRITE;
1981 if (vq->used_wrap_counter) {
1982 flags |= VRING_DESC_F_USED;
1983 flags |= VRING_DESC_F_AVAIL;
1985 flags &= ~VRING_DESC_F_USED;
1986 flags &= ~VRING_DESC_F_AVAIL;
1990 vq->desc_packed[vq->last_used_idx].flags = flags;
1992 head_idx = vq->last_used_idx;
1996 vq_inc_last_used_packed(vq, async->buffers_packed[from].count);
1999 if (from == vq->size)
2003 vq->desc_packed[head_idx].flags = head_flags;
2004 async->last_buffer_idx_packed = from;
2007 static __rte_always_inline uint16_t
2008 vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
2009 struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
2012 struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
2013 struct vhost_async *async = vq->async;
2014 struct async_inflight_info *pkts_info = async->pkts_info;
2015 uint16_t nr_cpl_pkts = 0;
2016 uint16_t n_descs = 0, n_buffers = 0;
2017 uint16_t start_idx, from, i;
2019 /* Check completed copies for the given DMA vChannel */
2020 vhost_async_dma_check_completed(dev, dma_id, vchan_id, VHOST_DMA_MAX_COPY_COMPLETE);
2022 start_idx = async_get_first_inflight_pkt_idx(vq);
2024 * Calculate the number of copy completed packets.
2025 * Note that there may be completed packets even if
2026 * no copies are reported done by the given DMA vChannel,
2027 * as it's possible that a virtqueue uses multiple DMA
2031 while (vq->async->pkts_cmpl_flag[from] && count--) {
2032 vq->async->pkts_cmpl_flag[from] = false;
2034 if (from >= vq->size)
2039 if (nr_cpl_pkts == 0)
2042 for (i = 0; i < nr_cpl_pkts; i++) {
2043 from = (start_idx + i) % vq->size;
2044 /* Only used with packed ring */
2045 n_buffers += pkts_info[from].nr_buffers;
2046 /* Only used with split ring */
2047 n_descs += pkts_info[from].descs;
2048 pkts[i] = pkts_info[from].mbuf;
2051 async->pkts_inflight_n -= nr_cpl_pkts;
2053 if (likely(vq->enabled && vq->access_ok)) {
2054 if (vq_is_packed(dev)) {
2055 write_back_completed_descs_packed(vq, n_buffers);
2056 vhost_vring_call_packed(dev, vq);
2058 write_back_completed_descs_split(vq, n_descs);
2059 __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
2060 vhost_vring_call_split(dev, vq);
2063 if (vq_is_packed(dev)) {
2064 async->last_buffer_idx_packed += n_buffers;
2065 if (async->last_buffer_idx_packed >= vq->size)
2066 async->last_buffer_idx_packed -= vq->size;
2068 async->last_desc_idx_split += n_descs;
2076 rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
2077 struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
2080 struct virtio_net *dev = get_device(vid);
2081 struct vhost_virtqueue *vq;
2082 uint16_t n_pkts_cpl = 0;
2087 VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
2088 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2089 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
2090 dev->ifname, __func__, queue_id);
2094 if (unlikely(!dma_copy_track[dma_id].vchans ||
2095 !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
2096 VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
2101 vq = dev->virtqueue[queue_id];
2103 if (!rte_spinlock_trylock(&vq->access_lock)) {
2104 VHOST_LOG_DATA(DEBUG, "(%s) %s: virtqueue %u is busy.\n", dev->ifname, __func__,
2109 if (unlikely(!vq->async)) {
2110 VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for virtqueue %d.\n",
2111 dev->ifname, __func__, queue_id);
2115 n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
2117 vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
2118 vq->stats.inflight_completed += n_pkts_cpl;
2121 rte_spinlock_unlock(&vq->access_lock);
2127 rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
2128 struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
2131 struct virtio_net *dev = get_device(vid);
2132 struct vhost_virtqueue *vq;
2133 uint16_t n_pkts_cpl = 0;
2138 VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
2139 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2140 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
2141 dev->ifname, __func__, queue_id);
2145 vq = dev->virtqueue[queue_id];
2147 if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) {
2148 VHOST_LOG_DATA(ERR, "(%s) %s() called without access lock taken.\n",
2149 dev->ifname, __func__);
2153 if (unlikely(!vq->async)) {
2154 VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %d.\n",
2155 dev->ifname, __func__, queue_id);
2159 if (unlikely(!dma_copy_track[dma_id].vchans ||
2160 !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
2161 VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
2166 n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
2168 vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
2169 vq->stats.inflight_completed += n_pkts_cpl;
2174 static __rte_always_inline uint32_t
2175 virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
2176 struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
2178 struct vhost_virtqueue *vq;
2181 VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
2182 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2183 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
2184 dev->ifname, __func__, queue_id);
2188 if (unlikely(!dma_copy_track[dma_id].vchans ||
2189 !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
2190 VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
2195 vq = dev->virtqueue[queue_id];
2197 rte_spinlock_lock(&vq->access_lock);
2199 if (unlikely(!vq->enabled || !vq->async))
2200 goto out_access_unlock;
2202 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2203 vhost_user_iotlb_rd_lock(vq);
2205 if (unlikely(!vq->access_ok))
2206 if (unlikely(vring_translate(dev, vq) < 0))
2209 count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
2213 if (vq_is_packed(dev))
2214 nb_tx = virtio_dev_rx_async_submit_packed(dev, vq, queue_id,
2215 pkts, count, dma_id, vchan_id);
2217 nb_tx = virtio_dev_rx_async_submit_split(dev, vq, queue_id,
2218 pkts, count, dma_id, vchan_id);
2220 vq->stats.inflight_submitted += nb_tx;
2223 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2224 vhost_user_iotlb_rd_unlock(vq);
2227 rte_spinlock_unlock(&vq->access_lock);
2233 rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
2234 struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
2237 struct virtio_net *dev = get_device(vid);
2242 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2243 VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
2244 dev->ifname, __func__);
2248 return virtio_dev_rx_async_submit(dev, queue_id, pkts, count, dma_id, vchan_id);
2252 virtio_net_with_host_offload(struct virtio_net *dev)
2255 ((1ULL << VIRTIO_NET_F_CSUM) |
2256 (1ULL << VIRTIO_NET_F_HOST_ECN) |
2257 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2258 (1ULL << VIRTIO_NET_F_HOST_TSO6) |
2259 (1ULL << VIRTIO_NET_F_HOST_UFO)))
2266 parse_headers(struct rte_mbuf *m, uint8_t *l4_proto)
2268 struct rte_ipv4_hdr *ipv4_hdr;
2269 struct rte_ipv6_hdr *ipv6_hdr;
2270 struct rte_ether_hdr *eth_hdr;
2272 uint16_t data_len = rte_pktmbuf_data_len(m);
2274 if (data_len < sizeof(struct rte_ether_hdr))
2277 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
2279 m->l2_len = sizeof(struct rte_ether_hdr);
2280 ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
2282 if (ethertype == RTE_ETHER_TYPE_VLAN) {
2283 if (data_len < sizeof(struct rte_ether_hdr) +
2284 sizeof(struct rte_vlan_hdr))
2287 struct rte_vlan_hdr *vlan_hdr =
2288 (struct rte_vlan_hdr *)(eth_hdr + 1);
2290 m->l2_len += sizeof(struct rte_vlan_hdr);
2291 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
2294 switch (ethertype) {
2295 case RTE_ETHER_TYPE_IPV4:
2296 if (data_len < m->l2_len + sizeof(struct rte_ipv4_hdr))
2298 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2300 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
2301 if (data_len < m->l2_len + m->l3_len)
2303 m->ol_flags |= RTE_MBUF_F_TX_IPV4;
2304 *l4_proto = ipv4_hdr->next_proto_id;
2306 case RTE_ETHER_TYPE_IPV6:
2307 if (data_len < m->l2_len + sizeof(struct rte_ipv6_hdr))
2309 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
2311 m->l3_len = sizeof(struct rte_ipv6_hdr);
2312 m->ol_flags |= RTE_MBUF_F_TX_IPV6;
2313 *l4_proto = ipv6_hdr->proto;
2316 /* a valid L3 header is needed for further L4 parsing */
2320 /* both CSUM and GSO need a valid L4 header */
2321 switch (*l4_proto) {
2323 if (data_len < m->l2_len + m->l3_len +
2324 sizeof(struct rte_tcp_hdr))
2328 if (data_len < m->l2_len + m->l3_len +
2329 sizeof(struct rte_udp_hdr))
2333 if (data_len < m->l2_len + m->l3_len +
2334 sizeof(struct rte_sctp_hdr))
2350 static __rte_always_inline void
2351 vhost_dequeue_offload_legacy(struct virtio_net *dev, struct virtio_net_hdr *hdr,
2354 uint8_t l4_proto = 0;
2355 struct rte_tcp_hdr *tcp_hdr = NULL;
2357 uint16_t data_len = rte_pktmbuf_data_len(m);
2359 if (parse_headers(m, &l4_proto) < 0)
2362 if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2363 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
2364 switch (hdr->csum_offset) {
2365 case (offsetof(struct rte_tcp_hdr, cksum)):
2366 if (l4_proto != IPPROTO_TCP)
2368 m->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
2370 case (offsetof(struct rte_udp_hdr, dgram_cksum)):
2371 if (l4_proto != IPPROTO_UDP)
2373 m->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
2375 case (offsetof(struct rte_sctp_hdr, cksum)):
2376 if (l4_proto != IPPROTO_SCTP)
2378 m->ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
2388 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2389 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2390 case VIRTIO_NET_HDR_GSO_TCPV4:
2391 case VIRTIO_NET_HDR_GSO_TCPV6:
2392 if (l4_proto != IPPROTO_TCP)
2394 tcp_hdr = rte_pktmbuf_mtod_offset(m,
2395 struct rte_tcp_hdr *,
2396 m->l2_len + m->l3_len);
2397 tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
2398 if (data_len < m->l2_len + m->l3_len + tcp_len)
2400 m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
2401 m->tso_segsz = hdr->gso_size;
2402 m->l4_len = tcp_len;
2404 case VIRTIO_NET_HDR_GSO_UDP:
2405 if (l4_proto != IPPROTO_UDP)
2407 m->ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
2408 m->tso_segsz = hdr->gso_size;
2409 m->l4_len = sizeof(struct rte_udp_hdr);
2412 VHOST_LOG_DATA(WARNING, "(%s) unsupported gso type %u.\n",
2413 dev->ifname, hdr->gso_type);
2425 static __rte_always_inline void
2426 vhost_dequeue_offload(struct virtio_net *dev, struct virtio_net_hdr *hdr,
2427 struct rte_mbuf *m, bool legacy_ol_flags)
2429 struct rte_net_hdr_lens hdr_lens;
2430 int l4_supported = 0;
2433 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
2436 if (legacy_ol_flags) {
2437 vhost_dequeue_offload_legacy(dev, hdr, m);
2441 m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
2443 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
2444 m->packet_type = ptype;
2445 if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
2446 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
2447 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
2450 /* According to Virtio 1.1 spec, the device only needs to look at
2451 * VIRTIO_NET_HDR_F_NEEDS_CSUM in the packet transmission path.
2452 * This differs from the processing incoming packets path where the
2453 * driver could rely on VIRTIO_NET_HDR_F_DATA_VALID flag set by the
2456 * 5.1.6.2.1 Driver Requirements: Packet Transmission
2457 * The driver MUST NOT set the VIRTIO_NET_HDR_F_DATA_VALID and
2458 * VIRTIO_NET_HDR_F_RSC_INFO bits in flags.
2460 * 5.1.6.2.2 Device Requirements: Packet Transmission
2461 * The device MUST ignore flag bits that it does not recognize.
2463 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2466 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
2467 if (hdr->csum_start <= hdrlen && l4_supported != 0) {
2468 m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
2470 /* Unknown proto or tunnel, do sw cksum. We can assume
2471 * the cksum field is in the first segment since the
2472 * buffers we provided to the host are large enough.
2473 * In case of SCTP, this will be wrong since it's a CRC
2474 * but there's nothing we can do.
2476 uint16_t csum = 0, off;
2478 if (rte_raw_cksum_mbuf(m, hdr->csum_start,
2479 rte_pktmbuf_pkt_len(m) - hdr->csum_start, &csum) < 0)
2481 if (likely(csum != 0xffff))
2483 off = hdr->csum_offset + hdr->csum_start;
2484 if (rte_pktmbuf_data_len(m) >= off + 1)
2485 *rte_pktmbuf_mtod_offset(m, uint16_t *, off) = csum;
2489 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2490 if (hdr->gso_size == 0)
2493 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2494 case VIRTIO_NET_HDR_GSO_TCPV4:
2495 case VIRTIO_NET_HDR_GSO_TCPV6:
2496 if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_TCP)
2498 m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2499 m->tso_segsz = hdr->gso_size;
2501 case VIRTIO_NET_HDR_GSO_UDP:
2502 if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_UDP)
2504 m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2505 m->tso_segsz = hdr->gso_size;
2513 static __rte_noinline void
2514 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
2515 struct buf_vector *buf_vec)
2518 uint64_t remain = sizeof(struct virtio_net_hdr);
2520 uint64_t dst = (uint64_t)(uintptr_t)hdr;
2523 len = RTE_MIN(remain, buf_vec->buf_len);
2524 src = buf_vec->buf_addr;
2525 rte_memcpy((void *)(uintptr_t)dst,
2526 (void *)(uintptr_t)src, len);
2534 static __rte_always_inline int
2535 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
2536 struct buf_vector *buf_vec, uint16_t nr_vec,
2537 struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
2538 bool legacy_ol_flags)
2540 uint32_t buf_avail, buf_offset;
2541 uint64_t buf_addr, buf_len;
2542 uint32_t mbuf_avail, mbuf_offset;
2544 struct rte_mbuf *cur = m, *prev = m;
2545 struct virtio_net_hdr tmp_hdr;
2546 struct virtio_net_hdr *hdr = NULL;
2547 /* A counter to avoid desc dead loop chain */
2548 uint16_t vec_idx = 0;
2549 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
2552 buf_addr = buf_vec[vec_idx].buf_addr;
2553 buf_len = buf_vec[vec_idx].buf_len;
2555 if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
2560 if (virtio_net_with_host_offload(dev)) {
2561 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
2563 * No luck, the virtio-net header doesn't fit
2564 * in a contiguous virtual area.
2566 copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
2569 hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
2574 * A virtio driver normally uses at least 2 desc buffers
2575 * for Tx: the first for storing the header, and others
2576 * for storing the data.
2578 if (unlikely(buf_len < dev->vhost_hlen)) {
2579 buf_offset = dev->vhost_hlen - buf_len;
2581 buf_addr = buf_vec[vec_idx].buf_addr;
2582 buf_len = buf_vec[vec_idx].buf_len;
2583 buf_avail = buf_len - buf_offset;
2584 } else if (buf_len == dev->vhost_hlen) {
2585 if (unlikely(++vec_idx >= nr_vec))
2587 buf_addr = buf_vec[vec_idx].buf_addr;
2588 buf_len = buf_vec[vec_idx].buf_len;
2591 buf_avail = buf_len;
2593 buf_offset = dev->vhost_hlen;
2594 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
2598 (uintptr_t)(buf_addr + buf_offset),
2599 (uint32_t)buf_avail, 0);
2602 mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
2604 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
2606 if (likely(cpy_len > MAX_BATCH_LEN ||
2607 vq->batch_copy_nb_elems >= vq->size ||
2608 (hdr && cur == m))) {
2609 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
2611 (void *)((uintptr_t)(buf_addr +
2612 buf_offset)), cpy_len);
2614 batch_copy[vq->batch_copy_nb_elems].dst =
2615 rte_pktmbuf_mtod_offset(cur, void *,
2617 batch_copy[vq->batch_copy_nb_elems].src =
2618 (void *)((uintptr_t)(buf_addr + buf_offset));
2619 batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
2620 vq->batch_copy_nb_elems++;
2623 mbuf_avail -= cpy_len;
2624 mbuf_offset += cpy_len;
2625 buf_avail -= cpy_len;
2626 buf_offset += cpy_len;
2628 /* This buf reaches to its end, get the next one */
2629 if (buf_avail == 0) {
2630 if (++vec_idx >= nr_vec)
2633 buf_addr = buf_vec[vec_idx].buf_addr;
2634 buf_len = buf_vec[vec_idx].buf_len;
2637 buf_avail = buf_len;
2639 PRINT_PACKET(dev, (uintptr_t)buf_addr,
2640 (uint32_t)buf_avail, 0);
2644 * This mbuf reaches to its end, get a new one
2645 * to hold more data.
2647 if (mbuf_avail == 0) {
2648 cur = rte_pktmbuf_alloc(mbuf_pool);
2649 if (unlikely(cur == NULL)) {
2650 VHOST_LOG_DATA(ERR, "(%s) failed to allocate memory for mbuf.\n",
2657 prev->data_len = mbuf_offset;
2659 m->pkt_len += mbuf_offset;
2663 mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
2667 prev->data_len = mbuf_offset;
2668 m->pkt_len += mbuf_offset;
2671 vhost_dequeue_offload(dev, hdr, m, legacy_ol_flags);
2679 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
2685 virtio_dev_extbuf_alloc(struct virtio_net *dev, struct rte_mbuf *pkt, uint32_t size)
2687 struct rte_mbuf_ext_shared_info *shinfo = NULL;
2688 uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
2693 total_len += sizeof(*shinfo) + sizeof(uintptr_t);
2694 total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
2696 if (unlikely(total_len > UINT16_MAX))
2699 buf_len = total_len;
2700 buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
2701 if (unlikely(buf == NULL))
2704 /* Initialize shinfo */
2705 shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
2706 virtio_dev_extbuf_free, buf);
2707 if (unlikely(shinfo == NULL)) {
2709 VHOST_LOG_DATA(ERR, "(%s) failed to init shinfo\n", dev->ifname);
2713 iova = rte_malloc_virt2iova(buf);
2714 rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
2715 rte_pktmbuf_reset_headroom(pkt);
2721 * Prepare a host supported pktmbuf.
2723 static __rte_always_inline int
2724 virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
2727 if (rte_pktmbuf_tailroom(pkt) >= data_len)
2730 /* attach an external buffer if supported */
2731 if (dev->extbuf && !virtio_dev_extbuf_alloc(dev, pkt, data_len))
2734 /* check if chained buffers are allowed */
2735 if (!dev->linearbuf)
2743 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
2744 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
2745 bool legacy_ol_flags)
2748 uint16_t free_entries;
2749 uint16_t dropped = 0;
2750 static bool allocerr_warned;
2753 * The ordering between avail index and
2754 * desc reads needs to be enforced.
2756 free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
2758 if (free_entries == 0)
2761 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
2763 VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
2765 count = RTE_MIN(count, MAX_PKT_BURST);
2766 count = RTE_MIN(count, free_entries);
2767 VHOST_LOG_DATA(DEBUG, "(%s) about to dequeue %u buffers\n",
2768 dev->ifname, count);
2770 if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
2773 for (i = 0; i < count; i++) {
2774 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2777 uint16_t nr_vec = 0;
2780 if (unlikely(fill_vec_buf_split(dev, vq,
2781 vq->last_avail_idx + i,
2783 &head_idx, &buf_len,
2784 VHOST_ACCESS_RO) < 0))
2787 update_shadow_used_ring_split(vq, head_idx, 0);
2789 err = virtio_dev_pktmbuf_prep(dev, pkts[i], buf_len);
2790 if (unlikely(err)) {
2792 * mbuf allocation fails for jumbo packets when external
2793 * buffer allocation is not allowed and linear buffer
2794 * is required. Drop this packet.
2796 if (!allocerr_warned) {
2797 VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
2798 dev->ifname, buf_len, mbuf_pool->name);
2799 allocerr_warned = true;
2806 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2807 mbuf_pool, legacy_ol_flags);
2808 if (unlikely(err)) {
2809 if (!allocerr_warned) {
2810 VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
2812 allocerr_warned = true;
2821 rte_pktmbuf_free_bulk(&pkts[i - 1], count - i + 1);
2823 vq->last_avail_idx += i;
2825 do_data_copy_dequeue(vq);
2826 if (unlikely(i < count))
2827 vq->shadow_used_idx = i;
2828 if (likely(vq->shadow_used_idx)) {
2829 flush_shadow_used_ring_split(dev, vq);
2830 vhost_vring_call_split(dev, vq);
2833 return (i - dropped);
2838 virtio_dev_tx_split_legacy(struct virtio_net *dev,
2839 struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2840 struct rte_mbuf **pkts, uint16_t count)
2842 return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
2847 virtio_dev_tx_split_compliant(struct virtio_net *dev,
2848 struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2849 struct rte_mbuf **pkts, uint16_t count)
2851 return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
2854 static __rte_always_inline int
2855 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
2856 struct vhost_virtqueue *vq,
2857 struct rte_mbuf **pkts,
2859 uintptr_t *desc_addrs,
2862 bool wrap = vq->avail_wrap_counter;
2863 struct vring_packed_desc *descs = vq->desc_packed;
2864 uint64_t lens[PACKED_BATCH_SIZE];
2865 uint64_t buf_lens[PACKED_BATCH_SIZE];
2866 uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2869 if (unlikely(avail_idx & PACKED_BATCH_MASK))
2871 if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
2874 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2875 flags = descs[avail_idx + i].flags;
2876 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
2877 (wrap == !!(flags & VRING_DESC_F_USED)) ||
2878 (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
2882 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
2884 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2885 lens[i] = descs[avail_idx + i].len;
2887 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2888 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
2889 descs[avail_idx + i].addr,
2890 &lens[i], VHOST_ACCESS_RW);
2893 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2894 if (unlikely(!desc_addrs[i]))
2896 if (unlikely((lens[i] != descs[avail_idx + i].len)))
2900 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2901 if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
2905 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2906 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
2908 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2909 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
2913 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2914 pkts[i]->pkt_len = lens[i] - buf_offset;
2915 pkts[i]->data_len = pkts[i]->pkt_len;
2916 ids[i] = descs[avail_idx + i].id;
2925 static __rte_always_inline int
2926 virtio_dev_tx_batch_packed(struct virtio_net *dev,
2927 struct vhost_virtqueue *vq,
2928 struct rte_mbuf **pkts,
2929 bool legacy_ol_flags)
2931 uint16_t avail_idx = vq->last_avail_idx;
2932 uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2933 struct virtio_net_hdr *hdr;
2934 uintptr_t desc_addrs[PACKED_BATCH_SIZE];
2935 uint16_t ids[PACKED_BATCH_SIZE];
2938 if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
2942 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2943 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
2945 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2946 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
2947 (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
2950 if (virtio_net_with_host_offload(dev)) {
2951 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2952 hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
2953 vhost_dequeue_offload(dev, hdr, pkts[i], legacy_ol_flags);
2957 if (virtio_net_is_inorder(dev))
2958 vhost_shadow_dequeue_batch_packed_inorder(vq,
2959 ids[PACKED_BATCH_SIZE - 1]);
2961 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
2963 vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2968 static __rte_always_inline int
2969 vhost_dequeue_single_packed(struct virtio_net *dev,
2970 struct vhost_virtqueue *vq,
2971 struct rte_mempool *mbuf_pool,
2972 struct rte_mbuf *pkts,
2974 uint16_t *desc_count,
2975 bool legacy_ol_flags)
2977 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2979 uint16_t nr_vec = 0;
2981 static bool allocerr_warned;
2983 if (unlikely(fill_vec_buf_packed(dev, vq,
2984 vq->last_avail_idx, desc_count,
2987 VHOST_ACCESS_RO) < 0))
2990 if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
2991 if (!allocerr_warned) {
2992 VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
2993 dev->ifname, buf_len, mbuf_pool->name);
2994 allocerr_warned = true;
2999 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
3000 mbuf_pool, legacy_ol_flags);
3001 if (unlikely(err)) {
3002 if (!allocerr_warned) {
3003 VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
3005 allocerr_warned = true;
3013 static __rte_always_inline int
3014 virtio_dev_tx_single_packed(struct virtio_net *dev,
3015 struct vhost_virtqueue *vq,
3016 struct rte_mempool *mbuf_pool,
3017 struct rte_mbuf *pkts,
3018 bool legacy_ol_flags)
3021 uint16_t buf_id, desc_count = 0;
3024 ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
3025 &desc_count, legacy_ol_flags);
3027 if (likely(desc_count > 0)) {
3028 if (virtio_net_is_inorder(dev))
3029 vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
3032 vhost_shadow_dequeue_single_packed(vq, buf_id,
3035 vq_inc_last_avail_packed(vq, desc_count);
3043 virtio_dev_tx_packed(struct virtio_net *dev,
3044 struct vhost_virtqueue *__rte_restrict vq,
3045 struct rte_mempool *mbuf_pool,
3046 struct rte_mbuf **__rte_restrict pkts,
3048 bool legacy_ol_flags)
3050 uint32_t pkt_idx = 0;
3052 if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
3056 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
3058 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
3059 if (!virtio_dev_tx_batch_packed(dev, vq,
3062 pkt_idx += PACKED_BATCH_SIZE;
3067 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
3072 } while (pkt_idx < count);
3074 if (pkt_idx != count)
3075 rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
3077 if (vq->shadow_used_idx) {
3078 do_data_copy_dequeue(vq);
3080 vhost_flush_dequeue_shadow_packed(dev, vq);
3081 vhost_vring_call_packed(dev, vq);
3089 virtio_dev_tx_packed_legacy(struct virtio_net *dev,
3090 struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3091 struct rte_mbuf **__rte_restrict pkts, uint32_t count)
3093 return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
3098 virtio_dev_tx_packed_compliant(struct virtio_net *dev,
3099 struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3100 struct rte_mbuf **__rte_restrict pkts, uint32_t count)
3102 return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
3106 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
3107 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
3109 struct virtio_net *dev;
3110 struct rte_mbuf *rarp_mbuf = NULL;
3111 struct vhost_virtqueue *vq;
3112 int16_t success = 1;
3114 dev = get_device(vid);
3118 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
3119 VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
3120 dev->ifname, __func__);
3124 if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
3125 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
3126 dev->ifname, __func__, queue_id);
3130 vq = dev->virtqueue[queue_id];
3132 if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
3135 if (unlikely(!vq->enabled)) {
3137 goto out_access_unlock;
3140 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3141 vhost_user_iotlb_rd_lock(vq);
3143 if (unlikely(!vq->access_ok))
3144 if (unlikely(vring_translate(dev, vq) < 0)) {
3150 * Construct a RARP broadcast packet, and inject it to the "pkts"
3151 * array, to looks like that guest actually send such packet.
3153 * Check user_send_rarp() for more information.
3155 * broadcast_rarp shares a cacheline in the virtio_net structure
3156 * with some fields that are accessed during enqueue and
3157 * __atomic_compare_exchange_n causes a write if performed compare
3158 * and exchange. This could result in false sharing between enqueue
3161 * Prevent unnecessary false sharing by reading broadcast_rarp first
3162 * and only performing compare and exchange if the read indicates it
3163 * is likely to be set.
3165 if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
3166 __atomic_compare_exchange_n(&dev->broadcast_rarp,
3167 &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
3169 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
3170 if (rarp_mbuf == NULL) {
3171 VHOST_LOG_DATA(ERR, "(%s) failed to make RARP packet.\n", dev->ifname);
3176 * Inject it to the head of "pkts" array, so that switch's mac
3177 * learning table will get updated first.
3179 pkts[0] = rarp_mbuf;
3180 vhost_queue_stats_update(dev, vq, pkts, 1);
3185 if (vq_is_packed(dev)) {
3186 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3187 count = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, pkts, count);
3189 count = virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool, pkts, count);
3191 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3192 count = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, pkts, count);
3194 count = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
3197 vhost_queue_stats_update(dev, vq, pkts, count);
3200 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3201 vhost_user_iotlb_rd_unlock(vq);
3204 rte_spinlock_unlock(&vq->access_lock);
3206 if (unlikely(rarp_mbuf != NULL))