1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
7 #include <linux/virtio_net.h>
10 #include <rte_memcpy.h>
12 #include <rte_ether.h>
14 #include <rte_vhost.h>
19 #include <rte_spinlock.h>
20 #include <rte_malloc.h>
21 #include <rte_vhost_async.h>
26 #define MAX_BATCH_LEN 256
28 #define VHOST_ASYNC_BATCH_THRESHOLD 32
30 static __rte_always_inline bool
31 rxvq_is_mergeable(struct virtio_net *dev)
33 return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
36 static __rte_always_inline bool
37 virtio_net_is_inorder(struct virtio_net *dev)
39 return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
43 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
45 return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
49 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
51 struct batch_copy_elem *elem = vq->batch_copy_elems;
52 uint16_t count = vq->batch_copy_nb_elems;
55 for (i = 0; i < count; i++) {
56 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
57 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
59 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
62 vq->batch_copy_nb_elems = 0;
66 do_data_copy_dequeue(struct vhost_virtqueue *vq)
68 struct batch_copy_elem *elem = vq->batch_copy_elems;
69 uint16_t count = vq->batch_copy_nb_elems;
72 for (i = 0; i < count; i++)
73 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
75 vq->batch_copy_nb_elems = 0;
78 static __rte_always_inline void
79 do_flush_shadow_used_ring_split(struct virtio_net *dev,
80 struct vhost_virtqueue *vq,
81 uint16_t to, uint16_t from, uint16_t size)
83 rte_memcpy(&vq->used->ring[to],
84 &vq->shadow_used_split[from],
85 size * sizeof(struct vring_used_elem));
86 vhost_log_cache_used_vring(dev, vq,
87 offsetof(struct vring_used, ring[to]),
88 size * sizeof(struct vring_used_elem));
91 static __rte_always_inline void
92 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
94 uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
96 if (used_idx + vq->shadow_used_idx <= vq->size) {
97 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
102 /* update used ring interval [used_idx, vq->size] */
103 size = vq->size - used_idx;
104 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
106 /* update the left half used ring interval [0, left_size] */
107 do_flush_shadow_used_ring_split(dev, vq, 0, size,
108 vq->shadow_used_idx - size);
110 vq->last_used_idx += vq->shadow_used_idx;
112 vhost_log_cache_sync(dev, vq);
114 __atomic_add_fetch(&vq->used->idx, vq->shadow_used_idx,
116 vq->shadow_used_idx = 0;
117 vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
118 sizeof(vq->used->idx));
121 static __rte_always_inline void
122 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
123 uint16_t desc_idx, uint32_t len)
125 uint16_t i = vq->shadow_used_idx++;
127 vq->shadow_used_split[i].id = desc_idx;
128 vq->shadow_used_split[i].len = len;
131 static __rte_always_inline void
132 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
133 struct vhost_virtqueue *vq)
136 uint16_t used_idx = vq->last_used_idx;
137 uint16_t head_idx = vq->last_used_idx;
138 uint16_t head_flags = 0;
140 /* Split loop in two to save memory barriers */
141 for (i = 0; i < vq->shadow_used_idx; i++) {
142 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
143 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
145 used_idx += vq->shadow_used_packed[i].count;
146 if (used_idx >= vq->size)
147 used_idx -= vq->size;
150 /* The ordering for storing desc flags needs to be enforced. */
151 rte_atomic_thread_fence(__ATOMIC_RELEASE);
153 for (i = 0; i < vq->shadow_used_idx; i++) {
156 if (vq->shadow_used_packed[i].len)
157 flags = VRING_DESC_F_WRITE;
161 if (vq->used_wrap_counter) {
162 flags |= VRING_DESC_F_USED;
163 flags |= VRING_DESC_F_AVAIL;
165 flags &= ~VRING_DESC_F_USED;
166 flags &= ~VRING_DESC_F_AVAIL;
170 vq->desc_packed[vq->last_used_idx].flags = flags;
172 vhost_log_cache_used_vring(dev, vq,
174 sizeof(struct vring_packed_desc),
175 sizeof(struct vring_packed_desc));
177 head_idx = vq->last_used_idx;
181 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
184 vq->desc_packed[head_idx].flags = head_flags;
186 vhost_log_cache_used_vring(dev, vq,
188 sizeof(struct vring_packed_desc),
189 sizeof(struct vring_packed_desc));
191 vq->shadow_used_idx = 0;
192 vhost_log_cache_sync(dev, vq);
195 static __rte_always_inline void
196 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
197 struct vhost_virtqueue *vq)
199 struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
201 vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
202 /* desc flags is the synchronization point for virtio packed vring */
203 __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
204 used_elem->flags, __ATOMIC_RELEASE);
206 vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
207 sizeof(struct vring_packed_desc),
208 sizeof(struct vring_packed_desc));
209 vq->shadow_used_idx = 0;
210 vhost_log_cache_sync(dev, vq);
213 static __rte_always_inline void
214 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
215 struct vhost_virtqueue *vq,
221 uint16_t last_used_idx;
222 struct vring_packed_desc *desc_base;
224 last_used_idx = vq->last_used_idx;
225 desc_base = &vq->desc_packed[last_used_idx];
227 flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
229 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
230 desc_base[i].id = ids[i];
231 desc_base[i].len = lens[i];
234 rte_atomic_thread_fence(__ATOMIC_RELEASE);
236 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
237 desc_base[i].flags = flags;
240 vhost_log_cache_used_vring(dev, vq, last_used_idx *
241 sizeof(struct vring_packed_desc),
242 sizeof(struct vring_packed_desc) *
244 vhost_log_cache_sync(dev, vq);
246 vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
249 static __rte_always_inline void
250 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
253 vq->shadow_used_packed[0].id = id;
255 if (!vq->shadow_used_idx) {
256 vq->shadow_last_used_idx = vq->last_used_idx;
257 vq->shadow_used_packed[0].flags =
258 PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
259 vq->shadow_used_packed[0].len = 0;
260 vq->shadow_used_packed[0].count = 1;
261 vq->shadow_used_idx++;
264 vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
267 static __rte_always_inline void
268 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
269 struct vhost_virtqueue *vq,
276 flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
278 if (!vq->shadow_used_idx) {
279 vq->shadow_last_used_idx = vq->last_used_idx;
280 vq->shadow_used_packed[0].id = ids[0];
281 vq->shadow_used_packed[0].len = 0;
282 vq->shadow_used_packed[0].count = 1;
283 vq->shadow_used_packed[0].flags = flags;
284 vq->shadow_used_idx++;
289 vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
290 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
291 vq->desc_packed[vq->last_used_idx + i].len = 0;
294 rte_atomic_thread_fence(__ATOMIC_RELEASE);
295 vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
296 vq->desc_packed[vq->last_used_idx + i].flags = flags;
298 vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
299 sizeof(struct vring_packed_desc),
300 sizeof(struct vring_packed_desc) *
302 vhost_log_cache_sync(dev, vq);
304 vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
307 static __rte_always_inline void
308 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
314 flags = vq->desc_packed[vq->last_used_idx].flags;
315 if (vq->used_wrap_counter) {
316 flags |= VRING_DESC_F_USED;
317 flags |= VRING_DESC_F_AVAIL;
319 flags &= ~VRING_DESC_F_USED;
320 flags &= ~VRING_DESC_F_AVAIL;
323 if (!vq->shadow_used_idx) {
324 vq->shadow_last_used_idx = vq->last_used_idx;
326 vq->shadow_used_packed[0].id = buf_id;
327 vq->shadow_used_packed[0].len = 0;
328 vq->shadow_used_packed[0].flags = flags;
329 vq->shadow_used_idx++;
331 vq->desc_packed[vq->last_used_idx].id = buf_id;
332 vq->desc_packed[vq->last_used_idx].len = 0;
333 vq->desc_packed[vq->last_used_idx].flags = flags;
336 vq_inc_last_used_packed(vq, count);
339 static __rte_always_inline void
340 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
346 vq->shadow_used_packed[0].id = buf_id;
348 flags = vq->desc_packed[vq->last_used_idx].flags;
349 if (vq->used_wrap_counter) {
350 flags |= VRING_DESC_F_USED;
351 flags |= VRING_DESC_F_AVAIL;
353 flags &= ~VRING_DESC_F_USED;
354 flags &= ~VRING_DESC_F_AVAIL;
357 if (!vq->shadow_used_idx) {
358 vq->shadow_last_used_idx = vq->last_used_idx;
359 vq->shadow_used_packed[0].len = 0;
360 vq->shadow_used_packed[0].flags = flags;
361 vq->shadow_used_idx++;
364 vq_inc_last_used_packed(vq, count);
367 static __rte_always_inline void
368 vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
372 uint16_t num_buffers)
376 for (i = 0; i < num_buffers; i++) {
377 /* enqueue shadow flush action aligned with batch num */
378 if (!vq->shadow_used_idx)
379 vq->shadow_aligned_idx = vq->last_used_idx &
381 vq->shadow_used_packed[vq->shadow_used_idx].id = id[i];
382 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
383 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
384 vq->shadow_aligned_idx += count[i];
385 vq->shadow_used_idx++;
389 static __rte_always_inline void
390 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
391 struct vhost_virtqueue *vq,
395 uint16_t num_buffers)
397 vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
399 if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
400 do_data_copy_enqueue(dev, vq);
401 vhost_flush_enqueue_shadow_packed(dev, vq);
405 /* avoid write operation when necessary, to lessen cache issues */
406 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
407 if ((var) != (val)) \
411 static __rte_always_inline void
412 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
414 uint64_t csum_l4 = m_buf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
416 if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
417 csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
420 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
421 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
424 case RTE_MBUF_F_TX_TCP_CKSUM:
425 net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
428 case RTE_MBUF_F_TX_UDP_CKSUM:
429 net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
432 case RTE_MBUF_F_TX_SCTP_CKSUM:
433 net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
438 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
439 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
440 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
443 /* IP cksum verification cannot be bypassed, then calculate here */
444 if (m_buf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
445 struct rte_ipv4_hdr *ipv4_hdr;
447 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
449 ipv4_hdr->hdr_checksum = 0;
450 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
453 if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
454 if (m_buf->ol_flags & RTE_MBUF_F_TX_IPV4)
455 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
457 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
458 net_hdr->gso_size = m_buf->tso_segsz;
459 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
461 } else if (m_buf->ol_flags & RTE_MBUF_F_TX_UDP_SEG) {
462 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
463 net_hdr->gso_size = m_buf->tso_segsz;
464 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
467 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
468 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
469 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
473 static __rte_always_inline int
474 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
475 struct buf_vector *buf_vec, uint16_t *vec_idx,
476 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
478 uint16_t vec_id = *vec_idx;
482 uint64_t desc_chunck_len = desc_len;
484 if (unlikely(vec_id >= BUF_VECTOR_MAX))
487 desc_addr = vhost_iova_to_vva(dev, vq,
491 if (unlikely(!desc_addr))
494 rte_prefetch0((void *)(uintptr_t)desc_addr);
496 buf_vec[vec_id].buf_iova = desc_iova;
497 buf_vec[vec_id].buf_addr = desc_addr;
498 buf_vec[vec_id].buf_len = desc_chunck_len;
500 desc_len -= desc_chunck_len;
501 desc_iova += desc_chunck_len;
509 static __rte_always_inline int
510 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
511 uint32_t avail_idx, uint16_t *vec_idx,
512 struct buf_vector *buf_vec, uint16_t *desc_chain_head,
513 uint32_t *desc_chain_len, uint8_t perm)
515 uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
516 uint16_t vec_id = *vec_idx;
519 uint32_t nr_descs = vq->size;
521 struct vring_desc *descs = vq->desc;
522 struct vring_desc *idesc = NULL;
524 if (unlikely(idx >= vq->size))
527 *desc_chain_head = idx;
529 if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
530 dlen = vq->desc[idx].len;
531 nr_descs = dlen / sizeof(struct vring_desc);
532 if (unlikely(nr_descs > vq->size))
535 descs = (struct vring_desc *)(uintptr_t)
536 vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
539 if (unlikely(!descs))
542 if (unlikely(dlen < vq->desc[idx].len)) {
544 * The indirect desc table is not contiguous
545 * in process VA space, we have to copy it.
547 idesc = vhost_alloc_copy_ind_table(dev, vq,
548 vq->desc[idx].addr, vq->desc[idx].len);
549 if (unlikely(!idesc))
559 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
560 free_ind_table(idesc);
564 dlen = descs[idx].len;
567 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
568 descs[idx].addr, dlen,
570 free_ind_table(idesc);
574 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
577 idx = descs[idx].next;
580 *desc_chain_len = len;
583 if (unlikely(!!idesc))
584 free_ind_table(idesc);
590 * Returns -1 on fail, 0 on success
593 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
594 uint32_t size, struct buf_vector *buf_vec,
595 uint16_t *num_buffers, uint16_t avail_head,
599 uint16_t vec_idx = 0;
600 uint16_t max_tries, tries = 0;
602 uint16_t head_idx = 0;
606 cur_idx = vq->last_avail_idx;
608 if (rxvq_is_mergeable(dev))
609 max_tries = vq->size - 1;
614 if (unlikely(cur_idx == avail_head))
617 * if we tried all available ring items, and still
618 * can't get enough buf, it means something abnormal
621 if (unlikely(++tries > max_tries))
624 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
627 VHOST_ACCESS_RW) < 0))
629 len = RTE_MIN(len, size);
630 update_shadow_used_ring_split(vq, head_idx, len);
642 static __rte_always_inline int
643 fill_vec_buf_packed_indirect(struct virtio_net *dev,
644 struct vhost_virtqueue *vq,
645 struct vring_packed_desc *desc, uint16_t *vec_idx,
646 struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
650 uint16_t vec_id = *vec_idx;
652 struct vring_packed_desc *descs, *idescs = NULL;
655 descs = (struct vring_packed_desc *)(uintptr_t)
656 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
657 if (unlikely(!descs))
660 if (unlikely(dlen < desc->len)) {
662 * The indirect desc table is not contiguous
663 * in process VA space, we have to copy it.
665 idescs = vhost_alloc_copy_ind_table(dev,
666 vq, desc->addr, desc->len);
667 if (unlikely(!idescs))
673 nr_descs = desc->len / sizeof(struct vring_packed_desc);
674 if (unlikely(nr_descs >= vq->size)) {
675 free_ind_table(idescs);
679 for (i = 0; i < nr_descs; i++) {
680 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
681 free_ind_table(idescs);
687 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
694 if (unlikely(!!idescs))
695 free_ind_table(idescs);
700 static __rte_always_inline int
701 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
702 uint16_t avail_idx, uint16_t *desc_count,
703 struct buf_vector *buf_vec, uint16_t *vec_idx,
704 uint16_t *buf_id, uint32_t *len, uint8_t perm)
706 bool wrap_counter = vq->avail_wrap_counter;
707 struct vring_packed_desc *descs = vq->desc_packed;
708 uint16_t vec_id = *vec_idx;
711 if (avail_idx < vq->last_avail_idx)
715 * Perform a load-acquire barrier in desc_is_avail to
716 * enforce the ordering between desc flags and desc
719 if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
726 if (unlikely(vec_id >= BUF_VECTOR_MAX))
729 if (unlikely(*desc_count >= vq->size))
733 *buf_id = descs[avail_idx].id;
735 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
736 if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
742 dlen = descs[avail_idx].len;
745 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
746 descs[avail_idx].addr,
752 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
755 if (++avail_idx >= vq->size) {
756 avail_idx -= vq->size;
766 static __rte_noinline void
767 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
768 struct buf_vector *buf_vec,
769 struct virtio_net_hdr_mrg_rxbuf *hdr)
772 uint64_t remain = dev->vhost_hlen;
773 uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
774 uint64_t iova = buf_vec->buf_iova;
777 len = RTE_MIN(remain,
779 dst = buf_vec->buf_addr;
780 rte_memcpy((void *)(uintptr_t)dst,
781 (void *)(uintptr_t)src,
784 PRINT_PACKET(dev, (uintptr_t)dst,
786 vhost_log_cache_write_iova(dev, vq,
796 static __rte_always_inline int
797 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
798 struct rte_mbuf *m, struct buf_vector *buf_vec,
799 uint16_t nr_vec, uint16_t num_buffers)
801 uint32_t vec_idx = 0;
802 uint32_t mbuf_offset, mbuf_avail;
803 uint32_t buf_offset, buf_avail;
804 uint64_t buf_addr, buf_iova, buf_len;
807 struct rte_mbuf *hdr_mbuf;
808 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
809 struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
812 if (unlikely(m == NULL)) {
817 buf_addr = buf_vec[vec_idx].buf_addr;
818 buf_iova = buf_vec[vec_idx].buf_iova;
819 buf_len = buf_vec[vec_idx].buf_len;
821 if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
828 if (unlikely(buf_len < dev->vhost_hlen)) {
829 memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
832 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
834 VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
835 dev->vid, num_buffers);
837 if (unlikely(buf_len < dev->vhost_hlen)) {
838 buf_offset = dev->vhost_hlen - buf_len;
840 buf_addr = buf_vec[vec_idx].buf_addr;
841 buf_iova = buf_vec[vec_idx].buf_iova;
842 buf_len = buf_vec[vec_idx].buf_len;
843 buf_avail = buf_len - buf_offset;
845 buf_offset = dev->vhost_hlen;
846 buf_avail = buf_len - dev->vhost_hlen;
849 mbuf_avail = rte_pktmbuf_data_len(m);
851 while (mbuf_avail != 0 || m->next != NULL) {
852 /* done with current buf, get the next one */
853 if (buf_avail == 0) {
855 if (unlikely(vec_idx >= nr_vec)) {
860 buf_addr = buf_vec[vec_idx].buf_addr;
861 buf_iova = buf_vec[vec_idx].buf_iova;
862 buf_len = buf_vec[vec_idx].buf_len;
868 /* done with current mbuf, get the next one */
869 if (mbuf_avail == 0) {
873 mbuf_avail = rte_pktmbuf_data_len(m);
877 virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
878 if (rxvq_is_mergeable(dev))
879 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
882 if (unlikely(hdr == &tmp_hdr)) {
883 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
885 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
887 vhost_log_cache_write_iova(dev, vq,
895 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
897 if (likely(cpy_len > MAX_BATCH_LEN ||
898 vq->batch_copy_nb_elems >= vq->size)) {
899 rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
900 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
902 vhost_log_cache_write_iova(dev, vq,
903 buf_iova + buf_offset,
905 PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
908 batch_copy[vq->batch_copy_nb_elems].dst =
909 (void *)((uintptr_t)(buf_addr + buf_offset));
910 batch_copy[vq->batch_copy_nb_elems].src =
911 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
912 batch_copy[vq->batch_copy_nb_elems].log_addr =
913 buf_iova + buf_offset;
914 batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
915 vq->batch_copy_nb_elems++;
918 mbuf_avail -= cpy_len;
919 mbuf_offset += cpy_len;
920 buf_avail -= cpy_len;
921 buf_offset += cpy_len;
929 static __rte_always_inline void
930 async_fill_vec(struct iovec *v, void *base, size_t len)
936 static __rte_always_inline void
937 async_fill_iter(struct rte_vhost_iov_iter *it, size_t count,
938 struct iovec *vec, unsigned long nr_seg)
945 it->nr_segs = nr_seg;
952 static __rte_always_inline void
953 async_fill_desc(struct rte_vhost_async_desc *desc,
954 struct rte_vhost_iov_iter *src, struct rte_vhost_iov_iter *dst)
960 static __rte_always_inline int
961 async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
962 struct rte_mbuf *m, struct buf_vector *buf_vec,
963 uint16_t nr_vec, uint16_t num_buffers,
964 struct iovec *src_iovec, struct iovec *dst_iovec,
965 struct rte_vhost_iov_iter *src_it,
966 struct rte_vhost_iov_iter *dst_it)
968 struct rte_mbuf *hdr_mbuf;
969 struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
970 uint64_t buf_addr, buf_iova;
973 uint32_t vec_idx = 0;
974 uint32_t mbuf_offset, mbuf_avail;
975 uint32_t buf_offset, buf_avail;
976 uint32_t cpy_len, buf_len;
983 if (unlikely(m == NULL)) {
988 buf_addr = buf_vec[vec_idx].buf_addr;
989 buf_iova = buf_vec[vec_idx].buf_iova;
990 buf_len = buf_vec[vec_idx].buf_len;
992 if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
999 if (unlikely(buf_len < dev->vhost_hlen)) {
1000 memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
1003 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
1005 VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
1006 dev->vid, num_buffers);
1008 if (unlikely(buf_len < dev->vhost_hlen)) {
1009 buf_offset = dev->vhost_hlen - buf_len;
1011 buf_addr = buf_vec[vec_idx].buf_addr;
1012 buf_iova = buf_vec[vec_idx].buf_iova;
1013 buf_len = buf_vec[vec_idx].buf_len;
1014 buf_avail = buf_len - buf_offset;
1016 buf_offset = dev->vhost_hlen;
1017 buf_avail = buf_len - dev->vhost_hlen;
1020 mbuf_avail = rte_pktmbuf_data_len(m);
1023 while (mbuf_avail != 0 || m->next != NULL) {
1024 /* done with current buf, get the next one */
1025 if (buf_avail == 0) {
1027 if (unlikely(vec_idx >= nr_vec)) {
1032 buf_addr = buf_vec[vec_idx].buf_addr;
1033 buf_iova = buf_vec[vec_idx].buf_iova;
1034 buf_len = buf_vec[vec_idx].buf_len;
1037 buf_avail = buf_len;
1040 /* done with current mbuf, get the next one */
1041 if (mbuf_avail == 0) {
1045 mbuf_avail = rte_pktmbuf_data_len(m);
1049 virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
1050 if (rxvq_is_mergeable(dev))
1051 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
1054 if (unlikely(hdr == &tmp_hdr)) {
1055 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
1057 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
1058 dev->vhost_hlen, 0);
1059 vhost_log_cache_write_iova(dev, vq,
1060 buf_vec[0].buf_iova,
1067 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1069 while (unlikely(cpy_len)) {
1070 hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
1071 buf_iova + buf_offset,
1072 cpy_len, &mapped_len);
1073 if (unlikely(!hpa)) {
1074 VHOST_LOG_DATA(ERR, "(%d) %s: failed to get hpa.\n",
1075 dev->vid, __func__);
1080 async_fill_vec(src_iovec + tvec_idx,
1081 (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
1082 mbuf_offset), (size_t)mapped_len);
1083 async_fill_vec(dst_iovec + tvec_idx,
1084 hpa, (size_t)mapped_len);
1086 tlen += (uint32_t)mapped_len;
1087 cpy_len -= (uint32_t)mapped_len;
1088 mbuf_avail -= (uint32_t)mapped_len;
1089 mbuf_offset += (uint32_t)mapped_len;
1090 buf_avail -= (uint32_t)mapped_len;
1091 buf_offset += (uint32_t)mapped_len;
1096 async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
1097 async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
1102 static __rte_always_inline int
1103 vhost_enqueue_single_packed(struct virtio_net *dev,
1104 struct vhost_virtqueue *vq,
1105 struct rte_mbuf *pkt,
1106 struct buf_vector *buf_vec,
1109 uint16_t nr_vec = 0;
1110 uint16_t avail_idx = vq->last_avail_idx;
1111 uint16_t max_tries, tries = 0;
1112 uint16_t buf_id = 0;
1114 uint16_t desc_count;
1115 uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1116 uint16_t num_buffers = 0;
1117 uint32_t buffer_len[vq->size];
1118 uint16_t buffer_buf_id[vq->size];
1119 uint16_t buffer_desc_count[vq->size];
1121 if (rxvq_is_mergeable(dev))
1122 max_tries = vq->size - 1;
1128 * if we tried all available ring items, and still
1129 * can't get enough buf, it means something abnormal
1132 if (unlikely(++tries > max_tries))
1135 if (unlikely(fill_vec_buf_packed(dev, vq,
1136 avail_idx, &desc_count,
1139 VHOST_ACCESS_RW) < 0))
1142 len = RTE_MIN(len, size);
1145 buffer_len[num_buffers] = len;
1146 buffer_buf_id[num_buffers] = buf_id;
1147 buffer_desc_count[num_buffers] = desc_count;
1150 *nr_descs += desc_count;
1151 avail_idx += desc_count;
1152 if (avail_idx >= vq->size)
1153 avail_idx -= vq->size;
1156 if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
1159 vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1160 buffer_desc_count, num_buffers);
1165 static __rte_noinline uint32_t
1166 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1167 struct rte_mbuf **pkts, uint32_t count)
1169 uint32_t pkt_idx = 0;
1170 uint16_t num_buffers;
1171 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1172 uint16_t avail_head;
1175 * The ordering between avail index and
1176 * desc reads needs to be enforced.
1178 avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1180 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1182 for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1183 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1184 uint16_t nr_vec = 0;
1186 if (unlikely(reserve_avail_buf_split(dev, vq,
1187 pkt_len, buf_vec, &num_buffers,
1188 avail_head, &nr_vec) < 0)) {
1189 VHOST_LOG_DATA(DEBUG,
1190 "(%d) failed to get enough desc from vring\n",
1192 vq->shadow_used_idx -= num_buffers;
1196 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1197 dev->vid, vq->last_avail_idx,
1198 vq->last_avail_idx + num_buffers);
1200 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1203 vq->shadow_used_idx -= num_buffers;
1207 vq->last_avail_idx += num_buffers;
1210 do_data_copy_enqueue(dev, vq);
1212 if (likely(vq->shadow_used_idx)) {
1213 flush_shadow_used_ring_split(dev, vq);
1214 vhost_vring_call_split(dev, vq);
1220 static __rte_always_inline int
1221 virtio_dev_rx_sync_batch_check(struct virtio_net *dev,
1222 struct vhost_virtqueue *vq,
1223 struct rte_mbuf **pkts,
1224 uint64_t *desc_addrs,
1227 bool wrap_counter = vq->avail_wrap_counter;
1228 struct vring_packed_desc *descs = vq->desc_packed;
1229 uint16_t avail_idx = vq->last_avail_idx;
1230 uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1233 if (unlikely(avail_idx & PACKED_BATCH_MASK))
1236 if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1239 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1240 if (unlikely(pkts[i]->next != NULL))
1242 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1247 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1248 lens[i] = descs[avail_idx + i].len;
1250 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1251 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1255 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1256 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1257 descs[avail_idx + i].addr,
1261 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1262 if (unlikely(!desc_addrs[i]))
1264 if (unlikely(lens[i] != descs[avail_idx + i].len))
1271 static __rte_always_inline void
1272 virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,
1273 struct vhost_virtqueue *vq,
1274 struct rte_mbuf **pkts,
1275 uint64_t *desc_addrs,
1278 uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1279 struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1280 struct vring_packed_desc *descs = vq->desc_packed;
1281 uint16_t avail_idx = vq->last_avail_idx;
1282 uint16_t ids[PACKED_BATCH_SIZE];
1285 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1286 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1287 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1288 (uintptr_t)desc_addrs[i];
1289 lens[i] = pkts[i]->pkt_len +
1290 sizeof(struct virtio_net_hdr_mrg_rxbuf);
1293 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1294 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1296 vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1298 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1299 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1300 rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1304 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1305 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1308 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1309 ids[i] = descs[avail_idx + i].id;
1311 vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1314 static __rte_always_inline int
1315 virtio_dev_rx_sync_batch_packed(struct virtio_net *dev,
1316 struct vhost_virtqueue *vq,
1317 struct rte_mbuf **pkts)
1319 uint64_t desc_addrs[PACKED_BATCH_SIZE];
1320 uint64_t lens[PACKED_BATCH_SIZE];
1322 if (virtio_dev_rx_sync_batch_check(dev, vq, pkts, desc_addrs, lens) == -1)
1325 if (vq->shadow_used_idx) {
1326 do_data_copy_enqueue(dev, vq);
1327 vhost_flush_enqueue_shadow_packed(dev, vq);
1330 virtio_dev_rx_batch_packed_copy(dev, vq, pkts, desc_addrs, lens);
1335 static __rte_always_inline int16_t
1336 virtio_dev_rx_single_packed(struct virtio_net *dev,
1337 struct vhost_virtqueue *vq,
1338 struct rte_mbuf *pkt)
1340 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1341 uint16_t nr_descs = 0;
1343 if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1345 VHOST_LOG_DATA(DEBUG,
1346 "(%d) failed to get enough desc from vring\n",
1351 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1352 dev->vid, vq->last_avail_idx,
1353 vq->last_avail_idx + nr_descs);
1355 vq_inc_last_avail_packed(vq, nr_descs);
1360 static __rte_noinline uint32_t
1361 virtio_dev_rx_packed(struct virtio_net *dev,
1362 struct vhost_virtqueue *__rte_restrict vq,
1363 struct rte_mbuf **__rte_restrict pkts,
1366 uint32_t pkt_idx = 0;
1369 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1371 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
1372 if (!virtio_dev_rx_sync_batch_packed(dev, vq,
1374 pkt_idx += PACKED_BATCH_SIZE;
1379 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1383 } while (pkt_idx < count);
1385 if (vq->shadow_used_idx) {
1386 do_data_copy_enqueue(dev, vq);
1387 vhost_flush_enqueue_shadow_packed(dev, vq);
1391 vhost_vring_call_packed(dev, vq);
1396 static __rte_always_inline uint32_t
1397 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1398 struct rte_mbuf **pkts, uint32_t count)
1400 struct vhost_virtqueue *vq;
1403 VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1404 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1405 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1406 dev->vid, __func__, queue_id);
1410 vq = dev->virtqueue[queue_id];
1412 rte_spinlock_lock(&vq->access_lock);
1414 if (unlikely(!vq->enabled))
1415 goto out_access_unlock;
1417 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1418 vhost_user_iotlb_rd_lock(vq);
1420 if (unlikely(!vq->access_ok))
1421 if (unlikely(vring_translate(dev, vq) < 0))
1424 count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1428 if (vq_is_packed(dev))
1429 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1431 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1434 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1435 vhost_user_iotlb_rd_unlock(vq);
1438 rte_spinlock_unlock(&vq->access_lock);
1444 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1445 struct rte_mbuf **__rte_restrict pkts, uint16_t count)
1447 struct virtio_net *dev = get_device(vid);
1452 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1454 "(%d) %s: built-in vhost net backend is disabled.\n",
1455 dev->vid, __func__);
1459 return virtio_dev_rx(dev, queue_id, pkts, count);
1462 static __rte_always_inline uint16_t
1463 virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,
1464 uint16_t vq_size, uint16_t n_inflight)
1466 return pkts_idx > n_inflight ? (pkts_idx - n_inflight) :
1467 (vq_size - n_inflight + pkts_idx) % vq_size;
1470 static __rte_always_inline void
1471 store_dma_desc_info_split(struct vring_used_elem *s_ring, struct vring_used_elem *d_ring,
1472 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1474 size_t elem_size = sizeof(struct vring_used_elem);
1476 if (d_idx + count <= ring_size) {
1477 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1479 uint16_t size = ring_size - d_idx;
1481 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1482 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1486 static __rte_always_inline void
1487 store_dma_desc_info_packed(struct vring_used_elem_packed *s_ring,
1488 struct vring_used_elem_packed *d_ring,
1489 uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
1491 size_t elem_size = sizeof(struct vring_used_elem_packed);
1493 if (d_idx + count <= ring_size) {
1494 rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
1496 uint16_t size = ring_size - d_idx;
1498 rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
1499 rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
1503 static __rte_noinline uint32_t
1504 virtio_dev_rx_async_submit_split(struct virtio_net *dev,
1505 struct vhost_virtqueue *vq, uint16_t queue_id,
1506 struct rte_mbuf **pkts, uint32_t count)
1508 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1509 uint32_t pkt_idx = 0, pkt_burst_idx = 0;
1510 uint16_t num_buffers;
1511 uint16_t avail_head;
1513 struct vhost_async *async = vq->async;
1514 struct rte_vhost_iov_iter *it_pool = async->it_pool;
1515 struct iovec *vec_pool = async->vec_pool;
1516 struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
1517 struct iovec *src_iovec = vec_pool;
1518 struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
1519 struct async_inflight_info *pkts_info = async->pkts_info;
1520 uint32_t n_pkts = 0, pkt_err = 0;
1522 uint16_t segs_await = 0;
1523 uint16_t iovec_idx = 0, it_idx = 0, slot_idx = 0;
1526 * The ordering between avail index and desc reads need to be enforced.
1528 avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1530 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1532 for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1533 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1534 uint16_t nr_vec = 0;
1536 if (unlikely(reserve_avail_buf_split(dev, vq,
1537 pkt_len, buf_vec, &num_buffers,
1538 avail_head, &nr_vec) < 0)) {
1539 VHOST_LOG_DATA(DEBUG,
1540 "(%d) failed to get enough desc from vring\n",
1542 vq->shadow_used_idx -= num_buffers;
1546 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1547 dev->vid, vq->last_avail_idx,
1548 vq->last_avail_idx + num_buffers);
1550 if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers,
1551 &src_iovec[iovec_idx], &dst_iovec[iovec_idx],
1552 &it_pool[it_idx], &it_pool[it_idx + 1]) < 0) {
1553 vq->shadow_used_idx -= num_buffers;
1557 async_fill_desc(&tdes[pkt_burst_idx++], &it_pool[it_idx],
1558 &it_pool[it_idx + 1]);
1560 slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
1561 pkts_info[slot_idx].descs = num_buffers;
1562 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1564 iovec_idx += it_pool[it_idx].nr_segs;
1565 segs_await += it_pool[it_idx].nr_segs;
1568 vq->last_avail_idx += num_buffers;
1571 * conditions to trigger async device transfer:
1572 * - buffered packet number reaches transfer threshold
1573 * - unused async iov number is less than max vhost vector
1575 if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
1576 ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
1578 n_xfer = async->ops.transfer_data(dev->vid,
1579 queue_id, tdes, 0, pkt_burst_idx);
1580 if (likely(n_xfer >= 0)) {
1584 "(%d) %s: failed to transfer data for queue id %d.\n",
1585 dev->vid, __func__, queue_id);
1593 if (unlikely(n_pkts < pkt_burst_idx)) {
1595 * log error packets number here and do actual
1596 * error processing when applications poll
1599 pkt_err = pkt_burst_idx - n_pkts;
1609 if (pkt_burst_idx) {
1610 n_xfer = async->ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
1611 if (likely(n_xfer >= 0)) {
1614 VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
1615 dev->vid, __func__, queue_id);
1619 if (unlikely(n_pkts < pkt_burst_idx))
1620 pkt_err = pkt_burst_idx - n_pkts;
1623 if (unlikely(pkt_err)) {
1624 uint16_t num_descs = 0;
1626 /* update number of completed packets */
1629 /* calculate the sum of descriptors to revert */
1630 while (pkt_err-- > 0) {
1631 num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
1635 /* recover shadow used ring and available ring */
1636 vq->shadow_used_idx -= num_descs;
1637 vq->last_avail_idx -= num_descs;
1640 /* keep used descriptors */
1641 if (likely(vq->shadow_used_idx)) {
1642 uint16_t to = async->desc_idx_split & (vq->size - 1);
1644 store_dma_desc_info_split(vq->shadow_used_split,
1645 async->descs_split, vq->size, 0, to,
1646 vq->shadow_used_idx);
1648 async->desc_idx_split += vq->shadow_used_idx;
1649 async->pkts_idx += pkt_idx;
1650 async->pkts_inflight_n += pkt_idx;
1651 vq->shadow_used_idx = 0;
1657 static __rte_always_inline void
1658 vhost_update_used_packed(struct vhost_virtqueue *vq,
1659 struct vring_used_elem_packed *shadow_ring,
1663 uint16_t used_idx = vq->last_used_idx;
1664 uint16_t head_idx = vq->last_used_idx;
1665 uint16_t head_flags = 0;
1670 /* Split loop in two to save memory barriers */
1671 for (i = 0; i < count; i++) {
1672 vq->desc_packed[used_idx].id = shadow_ring[i].id;
1673 vq->desc_packed[used_idx].len = shadow_ring[i].len;
1675 used_idx += shadow_ring[i].count;
1676 if (used_idx >= vq->size)
1677 used_idx -= vq->size;
1680 /* The ordering for storing desc flags needs to be enforced. */
1681 rte_atomic_thread_fence(__ATOMIC_RELEASE);
1683 for (i = 0; i < count; i++) {
1686 if (vq->shadow_used_packed[i].len)
1687 flags = VRING_DESC_F_WRITE;
1691 if (vq->used_wrap_counter) {
1692 flags |= VRING_DESC_F_USED;
1693 flags |= VRING_DESC_F_AVAIL;
1695 flags &= ~VRING_DESC_F_USED;
1696 flags &= ~VRING_DESC_F_AVAIL;
1700 vq->desc_packed[vq->last_used_idx].flags = flags;
1702 head_idx = vq->last_used_idx;
1706 vq_inc_last_used_packed(vq, shadow_ring[i].count);
1709 vq->desc_packed[head_idx].flags = head_flags;
1712 static __rte_always_inline int
1713 vhost_enqueue_async_packed(struct virtio_net *dev,
1714 struct vhost_virtqueue *vq,
1715 struct rte_mbuf *pkt,
1716 struct buf_vector *buf_vec,
1718 uint16_t *nr_buffers,
1719 struct iovec *src_iovec, struct iovec *dst_iovec,
1720 struct rte_vhost_iov_iter *src_it,
1721 struct rte_vhost_iov_iter *dst_it)
1723 uint16_t nr_vec = 0;
1724 uint16_t avail_idx = vq->last_avail_idx;
1725 uint16_t max_tries, tries = 0;
1726 uint16_t buf_id = 0;
1728 uint16_t desc_count = 0;
1729 uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1730 uint32_t buffer_len[vq->size];
1731 uint16_t buffer_buf_id[vq->size];
1732 uint16_t buffer_desc_count[vq->size];
1734 if (rxvq_is_mergeable(dev))
1735 max_tries = vq->size - 1;
1741 * if we tried all available ring items, and still
1742 * can't get enough buf, it means something abnormal
1745 if (unlikely(++tries > max_tries))
1748 if (unlikely(fill_vec_buf_packed(dev, vq, avail_idx, &desc_count, buf_vec, &nr_vec,
1749 &buf_id, &len, VHOST_ACCESS_RW) < 0))
1752 len = RTE_MIN(len, size);
1755 buffer_len[*nr_buffers] = len;
1756 buffer_buf_id[*nr_buffers] = buf_id;
1757 buffer_desc_count[*nr_buffers] = desc_count;
1759 *nr_descs += desc_count;
1760 avail_idx += desc_count;
1761 if (avail_idx >= vq->size)
1762 avail_idx -= vq->size;
1765 if (unlikely(async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec,
1766 *nr_buffers, src_iovec, dst_iovec,
1767 src_it, dst_it) < 0))
1770 vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);
1775 static __rte_always_inline int16_t
1776 virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1777 struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers,
1778 struct iovec *src_iovec, struct iovec *dst_iovec,
1779 struct rte_vhost_iov_iter *src_it, struct rte_vhost_iov_iter *dst_it)
1781 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1783 if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec, nr_descs, nr_buffers,
1784 src_iovec, dst_iovec,
1785 src_it, dst_it) < 0)) {
1786 VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from vring\n", dev->vid);
1790 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1791 dev->vid, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
1796 static __rte_always_inline void
1797 dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
1798 uint32_t nr_err, uint32_t *pkt_idx)
1800 uint16_t descs_err = 0;
1801 uint16_t buffers_err = 0;
1802 struct async_inflight_info *pkts_info = vq->async->pkts_info;
1805 /* calculate the sum of buffers and descs of DMA-error packets. */
1806 while (nr_err-- > 0) {
1807 descs_err += pkts_info[slot_idx % vq->size].descs;
1808 buffers_err += pkts_info[slot_idx % vq->size].nr_buffers;
1812 if (vq->last_avail_idx >= descs_err) {
1813 vq->last_avail_idx -= descs_err;
1815 vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
1816 vq->avail_wrap_counter ^= 1;
1819 vq->shadow_used_idx -= buffers_err;
1822 static __rte_noinline uint32_t
1823 virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
1824 struct vhost_virtqueue *vq, uint16_t queue_id,
1825 struct rte_mbuf **pkts, uint32_t count)
1827 uint32_t pkt_idx = 0, pkt_burst_idx = 0;
1828 uint32_t remained = count;
1830 uint16_t num_buffers;
1833 struct vhost_async *async = vq->async;
1834 struct rte_vhost_iov_iter *it_pool = async->it_pool;
1835 struct iovec *vec_pool = async->vec_pool;
1836 struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
1837 struct iovec *src_iovec = vec_pool;
1838 struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
1839 struct async_inflight_info *pkts_info = async->pkts_info;
1840 uint32_t n_pkts = 0, pkt_err = 0;
1841 uint16_t slot_idx = 0;
1842 uint16_t segs_await = 0;
1843 uint16_t iovec_idx = 0, it_idx = 0;
1846 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1850 if (unlikely(virtio_dev_rx_async_packed(dev, vq, pkts[pkt_idx],
1851 &num_descs, &num_buffers,
1852 &src_iovec[iovec_idx], &dst_iovec[iovec_idx],
1853 &it_pool[it_idx], &it_pool[it_idx + 1]) < 0))
1856 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
1858 async_fill_desc(&tdes[pkt_burst_idx++], &it_pool[it_idx],
1859 &it_pool[it_idx + 1]);
1860 pkts_info[slot_idx].descs = num_descs;
1861 pkts_info[slot_idx].nr_buffers = num_buffers;
1862 pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1863 iovec_idx += it_pool[it_idx].nr_segs;
1864 segs_await += it_pool[it_idx].nr_segs;
1869 vq_inc_last_avail_packed(vq, num_descs);
1872 * conditions to trigger async device transfer:
1873 * - buffered packet number reaches transfer threshold
1874 * - unused async iov number is less than max vhost vector
1876 if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
1877 ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
1878 n_xfer = async->ops.transfer_data(dev->vid,
1879 queue_id, tdes, 0, pkt_burst_idx);
1880 if (likely(n_xfer >= 0)) {
1884 "(%d) %s: failed to transfer data for queue id %d.\n",
1885 dev->vid, __func__, queue_id);
1893 if (unlikely(n_pkts < pkt_burst_idx)) {
1895 * log error packets number here and do actual
1896 * error processing when applications poll
1899 pkt_err = pkt_burst_idx - n_pkts;
1906 } while (pkt_idx < count);
1908 if (pkt_burst_idx) {
1909 n_xfer = async->ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
1910 if (likely(n_xfer >= 0)) {
1913 VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
1914 dev->vid, __func__, queue_id);
1918 if (unlikely(n_pkts < pkt_burst_idx))
1919 pkt_err = pkt_burst_idx - n_pkts;
1922 if (unlikely(pkt_err))
1923 dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
1925 if (likely(vq->shadow_used_idx)) {
1926 /* keep used descriptors. */
1927 store_dma_desc_info_packed(vq->shadow_used_packed, async->buffers_packed,
1928 vq->size, 0, async->buffer_idx_packed,
1929 vq->shadow_used_idx);
1931 async->buffer_idx_packed += vq->shadow_used_idx;
1932 if (async->buffer_idx_packed >= vq->size)
1933 async->buffer_idx_packed -= vq->size;
1935 async->pkts_idx += pkt_idx;
1936 if (async->pkts_idx >= vq->size)
1937 async->pkts_idx -= vq->size;
1939 vq->shadow_used_idx = 0;
1940 async->pkts_inflight_n += pkt_idx;
1946 static __rte_always_inline void
1947 write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
1949 struct vhost_async *async = vq->async;
1950 uint16_t nr_left = n_descs;
1955 from = async->last_desc_idx_split & (vq->size - 1);
1956 nr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;
1957 to = vq->last_used_idx & (vq->size - 1);
1959 if (to + nr_copy <= vq->size) {
1960 rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1961 nr_copy * sizeof(struct vring_used_elem));
1963 uint16_t size = vq->size - to;
1965 rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
1966 size * sizeof(struct vring_used_elem));
1967 rte_memcpy(&vq->used->ring[0], &async->descs_split[from + size],
1968 (nr_copy - size) * sizeof(struct vring_used_elem));
1971 async->last_desc_idx_split += nr_copy;
1972 vq->last_used_idx += nr_copy;
1974 } while (nr_left > 0);
1977 static __rte_always_inline void
1978 write_back_completed_descs_packed(struct vhost_virtqueue *vq,
1981 struct vhost_async *async = vq->async;
1982 uint16_t nr_left = n_buffers;
1986 from = async->last_buffer_idx_packed;
1987 to = (from + nr_left) % vq->size;
1989 vhost_update_used_packed(vq, async->buffers_packed + from, to - from);
1990 async->last_buffer_idx_packed += nr_left;
1993 vhost_update_used_packed(vq, async->buffers_packed + from,
1995 async->last_buffer_idx_packed = 0;
1996 nr_left -= vq->size - from;
1998 } while (nr_left > 0);
2001 static __rte_always_inline uint16_t
2002 vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
2003 struct rte_mbuf **pkts, uint16_t count)
2005 struct vhost_virtqueue *vq;
2006 struct vhost_async *async;
2007 struct async_inflight_info *pkts_info;
2009 uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
2010 uint16_t start_idx, pkts_idx, vq_size;
2013 vq = dev->virtqueue[queue_id];
2015 pkts_idx = async->pkts_idx % vq->size;
2016 pkts_info = async->pkts_info;
2018 start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
2019 vq_size, async->pkts_inflight_n);
2021 if (count > async->last_pkts_n) {
2022 n_cpl = async->ops.check_completed_copies(dev->vid,
2023 queue_id, 0, count - async->last_pkts_n);
2024 if (likely(n_cpl >= 0)) {
2028 "(%d) %s: failed to check completed copies for queue id %d.\n",
2029 dev->vid, __func__, queue_id);
2034 n_pkts_cpl += async->last_pkts_n;
2035 n_pkts_put = RTE_MIN(n_pkts_cpl, count);
2036 if (unlikely(n_pkts_put == 0)) {
2037 async->last_pkts_n = n_pkts_cpl;
2041 if (vq_is_packed(dev)) {
2042 for (i = 0; i < n_pkts_put; i++) {
2043 from = (start_idx + i) % vq_size;
2044 n_buffers += pkts_info[from].nr_buffers;
2045 pkts[i] = pkts_info[from].mbuf;
2048 for (i = 0; i < n_pkts_put; i++) {
2049 from = (start_idx + i) & (vq_size - 1);
2050 n_descs += pkts_info[from].descs;
2051 pkts[i] = pkts_info[from].mbuf;
2054 async->last_pkts_n = n_pkts_cpl - n_pkts_put;
2055 async->pkts_inflight_n -= n_pkts_put;
2057 if (likely(vq->enabled && vq->access_ok)) {
2058 if (vq_is_packed(dev)) {
2059 write_back_completed_descs_packed(vq, n_buffers);
2061 vhost_vring_call_packed(dev, vq);
2063 write_back_completed_descs_split(vq, n_descs);
2065 __atomic_add_fetch(&vq->used->idx, n_descs,
2067 vhost_vring_call_split(dev, vq);
2070 if (vq_is_packed(dev)) {
2071 async->last_buffer_idx_packed += n_buffers;
2072 if (async->last_buffer_idx_packed >= vq->size)
2073 async->last_buffer_idx_packed -= vq->size;
2075 async->last_desc_idx_split += n_descs;
2083 rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
2084 struct rte_mbuf **pkts, uint16_t count)
2086 struct virtio_net *dev = get_device(vid);
2087 struct vhost_virtqueue *vq;
2088 uint16_t n_pkts_cpl = 0;
2093 VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2094 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2095 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
2096 dev->vid, __func__, queue_id);
2100 vq = dev->virtqueue[queue_id];
2102 if (unlikely(!vq->async)) {
2103 VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
2104 dev->vid, __func__, queue_id);
2108 rte_spinlock_lock(&vq->access_lock);
2110 n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
2112 rte_spinlock_unlock(&vq->access_lock);
2118 rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
2119 struct rte_mbuf **pkts, uint16_t count)
2121 struct virtio_net *dev = get_device(vid);
2122 struct vhost_virtqueue *vq;
2123 uint16_t n_pkts_cpl = 0;
2128 VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2129 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2130 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
2131 dev->vid, __func__, queue_id);
2135 vq = dev->virtqueue[queue_id];
2137 if (unlikely(!vq->async)) {
2138 VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
2139 dev->vid, __func__, queue_id);
2143 n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
2148 static __rte_always_inline uint32_t
2149 virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
2150 struct rte_mbuf **pkts, uint32_t count)
2152 struct vhost_virtqueue *vq;
2155 VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2156 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
2157 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
2158 dev->vid, __func__, queue_id);
2162 vq = dev->virtqueue[queue_id];
2164 rte_spinlock_lock(&vq->access_lock);
2166 if (unlikely(!vq->enabled || !vq->async))
2167 goto out_access_unlock;
2169 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2170 vhost_user_iotlb_rd_lock(vq);
2172 if (unlikely(!vq->access_ok))
2173 if (unlikely(vring_translate(dev, vq) < 0))
2176 count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
2180 if (vq_is_packed(dev))
2181 nb_tx = virtio_dev_rx_async_submit_packed(dev, vq, queue_id,
2184 nb_tx = virtio_dev_rx_async_submit_split(dev, vq, queue_id,
2188 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2189 vhost_user_iotlb_rd_unlock(vq);
2192 rte_spinlock_unlock(&vq->access_lock);
2198 rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
2199 struct rte_mbuf **pkts, uint16_t count)
2201 struct virtio_net *dev = get_device(vid);
2206 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2208 "(%d) %s: built-in vhost net backend is disabled.\n",
2209 dev->vid, __func__);
2213 return virtio_dev_rx_async_submit(dev, queue_id, pkts, count);
2217 virtio_net_with_host_offload(struct virtio_net *dev)
2220 ((1ULL << VIRTIO_NET_F_CSUM) |
2221 (1ULL << VIRTIO_NET_F_HOST_ECN) |
2222 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2223 (1ULL << VIRTIO_NET_F_HOST_TSO6) |
2224 (1ULL << VIRTIO_NET_F_HOST_UFO)))
2231 parse_headers(struct rte_mbuf *m, uint8_t *l4_proto)
2233 struct rte_ipv4_hdr *ipv4_hdr;
2234 struct rte_ipv6_hdr *ipv6_hdr;
2235 struct rte_ether_hdr *eth_hdr;
2237 uint16_t data_len = rte_pktmbuf_data_len(m);
2239 if (data_len < sizeof(struct rte_ether_hdr))
2242 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
2244 m->l2_len = sizeof(struct rte_ether_hdr);
2245 ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
2247 if (ethertype == RTE_ETHER_TYPE_VLAN) {
2248 if (data_len < sizeof(struct rte_ether_hdr) +
2249 sizeof(struct rte_vlan_hdr))
2252 struct rte_vlan_hdr *vlan_hdr =
2253 (struct rte_vlan_hdr *)(eth_hdr + 1);
2255 m->l2_len += sizeof(struct rte_vlan_hdr);
2256 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
2259 switch (ethertype) {
2260 case RTE_ETHER_TYPE_IPV4:
2261 if (data_len < m->l2_len + sizeof(struct rte_ipv4_hdr))
2263 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2265 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
2266 if (data_len < m->l2_len + m->l3_len)
2268 m->ol_flags |= RTE_MBUF_F_TX_IPV4;
2269 *l4_proto = ipv4_hdr->next_proto_id;
2271 case RTE_ETHER_TYPE_IPV6:
2272 if (data_len < m->l2_len + sizeof(struct rte_ipv6_hdr))
2274 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
2276 m->l3_len = sizeof(struct rte_ipv6_hdr);
2277 m->ol_flags |= RTE_MBUF_F_TX_IPV6;
2278 *l4_proto = ipv6_hdr->proto;
2281 /* a valid L3 header is needed for further L4 parsing */
2285 /* both CSUM and GSO need a valid L4 header */
2286 switch (*l4_proto) {
2288 if (data_len < m->l2_len + m->l3_len +
2289 sizeof(struct rte_tcp_hdr))
2293 if (data_len < m->l2_len + m->l3_len +
2294 sizeof(struct rte_udp_hdr))
2298 if (data_len < m->l2_len + m->l3_len +
2299 sizeof(struct rte_sctp_hdr))
2315 static __rte_always_inline void
2316 vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
2318 uint8_t l4_proto = 0;
2319 struct rte_tcp_hdr *tcp_hdr = NULL;
2321 uint16_t data_len = rte_pktmbuf_data_len(m);
2323 if (parse_headers(m, &l4_proto) < 0)
2326 if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2327 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
2328 switch (hdr->csum_offset) {
2329 case (offsetof(struct rte_tcp_hdr, cksum)):
2330 if (l4_proto != IPPROTO_TCP)
2332 m->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
2334 case (offsetof(struct rte_udp_hdr, dgram_cksum)):
2335 if (l4_proto != IPPROTO_UDP)
2337 m->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
2339 case (offsetof(struct rte_sctp_hdr, cksum)):
2340 if (l4_proto != IPPROTO_SCTP)
2342 m->ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
2352 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2353 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2354 case VIRTIO_NET_HDR_GSO_TCPV4:
2355 case VIRTIO_NET_HDR_GSO_TCPV6:
2356 if (l4_proto != IPPROTO_TCP)
2358 tcp_hdr = rte_pktmbuf_mtod_offset(m,
2359 struct rte_tcp_hdr *,
2360 m->l2_len + m->l3_len);
2361 tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
2362 if (data_len < m->l2_len + m->l3_len + tcp_len)
2364 m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
2365 m->tso_segsz = hdr->gso_size;
2366 m->l4_len = tcp_len;
2368 case VIRTIO_NET_HDR_GSO_UDP:
2369 if (l4_proto != IPPROTO_UDP)
2371 m->ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
2372 m->tso_segsz = hdr->gso_size;
2373 m->l4_len = sizeof(struct rte_udp_hdr);
2376 VHOST_LOG_DATA(WARNING,
2377 "unsupported gso type %u.\n", hdr->gso_type);
2389 static __rte_always_inline void
2390 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m,
2391 bool legacy_ol_flags)
2393 struct rte_net_hdr_lens hdr_lens;
2394 int l4_supported = 0;
2397 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
2400 if (legacy_ol_flags) {
2401 vhost_dequeue_offload_legacy(hdr, m);
2405 m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
2407 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
2408 m->packet_type = ptype;
2409 if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
2410 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
2411 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
2414 /* According to Virtio 1.1 spec, the device only needs to look at
2415 * VIRTIO_NET_HDR_F_NEEDS_CSUM in the packet transmission path.
2416 * This differs from the processing incoming packets path where the
2417 * driver could rely on VIRTIO_NET_HDR_F_DATA_VALID flag set by the
2420 * 5.1.6.2.1 Driver Requirements: Packet Transmission
2421 * The driver MUST NOT set the VIRTIO_NET_HDR_F_DATA_VALID and
2422 * VIRTIO_NET_HDR_F_RSC_INFO bits in flags.
2424 * 5.1.6.2.2 Device Requirements: Packet Transmission
2425 * The device MUST ignore flag bits that it does not recognize.
2427 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2430 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
2431 if (hdr->csum_start <= hdrlen && l4_supported != 0) {
2432 m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
2434 /* Unknown proto or tunnel, do sw cksum. We can assume
2435 * the cksum field is in the first segment since the
2436 * buffers we provided to the host are large enough.
2437 * In case of SCTP, this will be wrong since it's a CRC
2438 * but there's nothing we can do.
2440 uint16_t csum = 0, off;
2442 if (rte_raw_cksum_mbuf(m, hdr->csum_start,
2443 rte_pktmbuf_pkt_len(m) - hdr->csum_start, &csum) < 0)
2445 if (likely(csum != 0xffff))
2447 off = hdr->csum_offset + hdr->csum_start;
2448 if (rte_pktmbuf_data_len(m) >= off + 1)
2449 *rte_pktmbuf_mtod_offset(m, uint16_t *, off) = csum;
2453 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2454 if (hdr->gso_size == 0)
2457 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2458 case VIRTIO_NET_HDR_GSO_TCPV4:
2459 case VIRTIO_NET_HDR_GSO_TCPV6:
2460 if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_TCP)
2462 m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2463 m->tso_segsz = hdr->gso_size;
2465 case VIRTIO_NET_HDR_GSO_UDP:
2466 if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_UDP)
2468 m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
2469 m->tso_segsz = hdr->gso_size;
2477 static __rte_noinline void
2478 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
2479 struct buf_vector *buf_vec)
2482 uint64_t remain = sizeof(struct virtio_net_hdr);
2484 uint64_t dst = (uint64_t)(uintptr_t)hdr;
2487 len = RTE_MIN(remain, buf_vec->buf_len);
2488 src = buf_vec->buf_addr;
2489 rte_memcpy((void *)(uintptr_t)dst,
2490 (void *)(uintptr_t)src, len);
2498 static __rte_always_inline int
2499 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
2500 struct buf_vector *buf_vec, uint16_t nr_vec,
2501 struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
2502 bool legacy_ol_flags)
2504 uint32_t buf_avail, buf_offset;
2505 uint64_t buf_addr, buf_len;
2506 uint32_t mbuf_avail, mbuf_offset;
2508 struct rte_mbuf *cur = m, *prev = m;
2509 struct virtio_net_hdr tmp_hdr;
2510 struct virtio_net_hdr *hdr = NULL;
2511 /* A counter to avoid desc dead loop chain */
2512 uint16_t vec_idx = 0;
2513 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
2516 buf_addr = buf_vec[vec_idx].buf_addr;
2517 buf_len = buf_vec[vec_idx].buf_len;
2519 if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
2524 if (virtio_net_with_host_offload(dev)) {
2525 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
2527 * No luck, the virtio-net header doesn't fit
2528 * in a contiguous virtual area.
2530 copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
2533 hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
2538 * A virtio driver normally uses at least 2 desc buffers
2539 * for Tx: the first for storing the header, and others
2540 * for storing the data.
2542 if (unlikely(buf_len < dev->vhost_hlen)) {
2543 buf_offset = dev->vhost_hlen - buf_len;
2545 buf_addr = buf_vec[vec_idx].buf_addr;
2546 buf_len = buf_vec[vec_idx].buf_len;
2547 buf_avail = buf_len - buf_offset;
2548 } else if (buf_len == dev->vhost_hlen) {
2549 if (unlikely(++vec_idx >= nr_vec))
2551 buf_addr = buf_vec[vec_idx].buf_addr;
2552 buf_len = buf_vec[vec_idx].buf_len;
2555 buf_avail = buf_len;
2557 buf_offset = dev->vhost_hlen;
2558 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
2562 (uintptr_t)(buf_addr + buf_offset),
2563 (uint32_t)buf_avail, 0);
2566 mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
2568 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
2570 if (likely(cpy_len > MAX_BATCH_LEN ||
2571 vq->batch_copy_nb_elems >= vq->size ||
2572 (hdr && cur == m))) {
2573 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
2575 (void *)((uintptr_t)(buf_addr +
2576 buf_offset)), cpy_len);
2578 batch_copy[vq->batch_copy_nb_elems].dst =
2579 rte_pktmbuf_mtod_offset(cur, void *,
2581 batch_copy[vq->batch_copy_nb_elems].src =
2582 (void *)((uintptr_t)(buf_addr + buf_offset));
2583 batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
2584 vq->batch_copy_nb_elems++;
2587 mbuf_avail -= cpy_len;
2588 mbuf_offset += cpy_len;
2589 buf_avail -= cpy_len;
2590 buf_offset += cpy_len;
2592 /* This buf reaches to its end, get the next one */
2593 if (buf_avail == 0) {
2594 if (++vec_idx >= nr_vec)
2597 buf_addr = buf_vec[vec_idx].buf_addr;
2598 buf_len = buf_vec[vec_idx].buf_len;
2601 buf_avail = buf_len;
2603 PRINT_PACKET(dev, (uintptr_t)buf_addr,
2604 (uint32_t)buf_avail, 0);
2608 * This mbuf reaches to its end, get a new one
2609 * to hold more data.
2611 if (mbuf_avail == 0) {
2612 cur = rte_pktmbuf_alloc(mbuf_pool);
2613 if (unlikely(cur == NULL)) {
2614 VHOST_LOG_DATA(ERR, "Failed to "
2615 "allocate memory for mbuf.\n");
2621 prev->data_len = mbuf_offset;
2623 m->pkt_len += mbuf_offset;
2627 mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
2631 prev->data_len = mbuf_offset;
2632 m->pkt_len += mbuf_offset;
2635 vhost_dequeue_offload(hdr, m, legacy_ol_flags);
2643 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
2649 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
2651 struct rte_mbuf_ext_shared_info *shinfo = NULL;
2652 uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
2657 total_len += sizeof(*shinfo) + sizeof(uintptr_t);
2658 total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
2660 if (unlikely(total_len > UINT16_MAX))
2663 buf_len = total_len;
2664 buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
2665 if (unlikely(buf == NULL))
2668 /* Initialize shinfo */
2669 shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
2670 virtio_dev_extbuf_free, buf);
2671 if (unlikely(shinfo == NULL)) {
2673 VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
2677 iova = rte_malloc_virt2iova(buf);
2678 rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
2679 rte_pktmbuf_reset_headroom(pkt);
2685 * Prepare a host supported pktmbuf.
2687 static __rte_always_inline int
2688 virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
2691 if (rte_pktmbuf_tailroom(pkt) >= data_len)
2694 /* attach an external buffer if supported */
2695 if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
2698 /* check if chained buffers are allowed */
2699 if (!dev->linearbuf)
2707 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
2708 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
2709 bool legacy_ol_flags)
2712 uint16_t free_entries;
2713 uint16_t dropped = 0;
2714 static bool allocerr_warned;
2717 * The ordering between avail index and
2718 * desc reads needs to be enforced.
2720 free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
2722 if (free_entries == 0)
2725 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
2727 VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2729 count = RTE_MIN(count, MAX_PKT_BURST);
2730 count = RTE_MIN(count, free_entries);
2731 VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
2734 if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
2737 for (i = 0; i < count; i++) {
2738 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2741 uint16_t nr_vec = 0;
2744 if (unlikely(fill_vec_buf_split(dev, vq,
2745 vq->last_avail_idx + i,
2747 &head_idx, &buf_len,
2748 VHOST_ACCESS_RO) < 0))
2751 update_shadow_used_ring_split(vq, head_idx, 0);
2753 err = virtio_dev_pktmbuf_prep(dev, pkts[i], buf_len);
2754 if (unlikely(err)) {
2756 * mbuf allocation fails for jumbo packets when external
2757 * buffer allocation is not allowed and linear buffer
2758 * is required. Drop this packet.
2760 if (!allocerr_warned) {
2762 "Failed mbuf alloc of size %d from %s on %s.\n",
2763 buf_len, mbuf_pool->name, dev->ifname);
2764 allocerr_warned = true;
2771 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2772 mbuf_pool, legacy_ol_flags);
2773 if (unlikely(err)) {
2774 if (!allocerr_warned) {
2776 "Failed to copy desc to mbuf on %s.\n",
2778 allocerr_warned = true;
2787 rte_pktmbuf_free_bulk(&pkts[i - 1], count - i + 1);
2789 vq->last_avail_idx += i;
2791 do_data_copy_dequeue(vq);
2792 if (unlikely(i < count))
2793 vq->shadow_used_idx = i;
2794 if (likely(vq->shadow_used_idx)) {
2795 flush_shadow_used_ring_split(dev, vq);
2796 vhost_vring_call_split(dev, vq);
2799 return (i - dropped);
2804 virtio_dev_tx_split_legacy(struct virtio_net *dev,
2805 struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2806 struct rte_mbuf **pkts, uint16_t count)
2808 return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
2813 virtio_dev_tx_split_compliant(struct virtio_net *dev,
2814 struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
2815 struct rte_mbuf **pkts, uint16_t count)
2817 return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
2820 static __rte_always_inline int
2821 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
2822 struct vhost_virtqueue *vq,
2823 struct rte_mbuf **pkts,
2825 uintptr_t *desc_addrs,
2828 bool wrap = vq->avail_wrap_counter;
2829 struct vring_packed_desc *descs = vq->desc_packed;
2830 uint64_t lens[PACKED_BATCH_SIZE];
2831 uint64_t buf_lens[PACKED_BATCH_SIZE];
2832 uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2835 if (unlikely(avail_idx & PACKED_BATCH_MASK))
2837 if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
2840 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2841 flags = descs[avail_idx + i].flags;
2842 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
2843 (wrap == !!(flags & VRING_DESC_F_USED)) ||
2844 (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
2848 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
2850 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2851 lens[i] = descs[avail_idx + i].len;
2853 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2854 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
2855 descs[avail_idx + i].addr,
2856 &lens[i], VHOST_ACCESS_RW);
2859 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2860 if (unlikely(!desc_addrs[i]))
2862 if (unlikely((lens[i] != descs[avail_idx + i].len)))
2866 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2867 if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
2871 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2872 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
2874 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2875 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
2879 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2880 pkts[i]->pkt_len = lens[i] - buf_offset;
2881 pkts[i]->data_len = pkts[i]->pkt_len;
2882 ids[i] = descs[avail_idx + i].id;
2891 static __rte_always_inline int
2892 virtio_dev_tx_batch_packed(struct virtio_net *dev,
2893 struct vhost_virtqueue *vq,
2894 struct rte_mbuf **pkts,
2895 bool legacy_ol_flags)
2897 uint16_t avail_idx = vq->last_avail_idx;
2898 uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2899 struct virtio_net_hdr *hdr;
2900 uintptr_t desc_addrs[PACKED_BATCH_SIZE];
2901 uint16_t ids[PACKED_BATCH_SIZE];
2904 if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
2908 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2909 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
2911 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2912 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
2913 (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
2916 if (virtio_net_with_host_offload(dev)) {
2917 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2918 hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
2919 vhost_dequeue_offload(hdr, pkts[i], legacy_ol_flags);
2923 if (virtio_net_is_inorder(dev))
2924 vhost_shadow_dequeue_batch_packed_inorder(vq,
2925 ids[PACKED_BATCH_SIZE - 1]);
2927 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
2929 vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2934 static __rte_always_inline int
2935 vhost_dequeue_single_packed(struct virtio_net *dev,
2936 struct vhost_virtqueue *vq,
2937 struct rte_mempool *mbuf_pool,
2938 struct rte_mbuf *pkts,
2940 uint16_t *desc_count,
2941 bool legacy_ol_flags)
2943 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2945 uint16_t nr_vec = 0;
2947 static bool allocerr_warned;
2949 if (unlikely(fill_vec_buf_packed(dev, vq,
2950 vq->last_avail_idx, desc_count,
2953 VHOST_ACCESS_RO) < 0))
2956 if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
2957 if (!allocerr_warned) {
2959 "Failed mbuf alloc of size %d from %s on %s.\n",
2960 buf_len, mbuf_pool->name, dev->ifname);
2961 allocerr_warned = true;
2966 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
2967 mbuf_pool, legacy_ol_flags);
2968 if (unlikely(err)) {
2969 if (!allocerr_warned) {
2971 "Failed to copy desc to mbuf on %s.\n",
2973 allocerr_warned = true;
2981 static __rte_always_inline int
2982 virtio_dev_tx_single_packed(struct virtio_net *dev,
2983 struct vhost_virtqueue *vq,
2984 struct rte_mempool *mbuf_pool,
2985 struct rte_mbuf *pkts,
2986 bool legacy_ol_flags)
2989 uint16_t buf_id, desc_count = 0;
2992 ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2993 &desc_count, legacy_ol_flags);
2995 if (likely(desc_count > 0)) {
2996 if (virtio_net_is_inorder(dev))
2997 vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
3000 vhost_shadow_dequeue_single_packed(vq, buf_id,
3003 vq_inc_last_avail_packed(vq, desc_count);
3011 virtio_dev_tx_packed(struct virtio_net *dev,
3012 struct vhost_virtqueue *__rte_restrict vq,
3013 struct rte_mempool *mbuf_pool,
3014 struct rte_mbuf **__rte_restrict pkts,
3016 bool legacy_ol_flags)
3018 uint32_t pkt_idx = 0;
3020 if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
3024 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
3026 if (count - pkt_idx >= PACKED_BATCH_SIZE) {
3027 if (!virtio_dev_tx_batch_packed(dev, vq,
3030 pkt_idx += PACKED_BATCH_SIZE;
3035 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
3040 } while (pkt_idx < count);
3042 if (pkt_idx != count)
3043 rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
3045 if (vq->shadow_used_idx) {
3046 do_data_copy_dequeue(vq);
3048 vhost_flush_dequeue_shadow_packed(dev, vq);
3049 vhost_vring_call_packed(dev, vq);
3057 virtio_dev_tx_packed_legacy(struct virtio_net *dev,
3058 struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3059 struct rte_mbuf **__rte_restrict pkts, uint32_t count)
3061 return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
3066 virtio_dev_tx_packed_compliant(struct virtio_net *dev,
3067 struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
3068 struct rte_mbuf **__rte_restrict pkts, uint32_t count)
3070 return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
3074 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
3075 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
3077 struct virtio_net *dev;
3078 struct rte_mbuf *rarp_mbuf = NULL;
3079 struct vhost_virtqueue *vq;
3080 int16_t success = 1;
3082 dev = get_device(vid);
3086 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
3088 "(%d) %s: built-in vhost net backend is disabled.\n",
3089 dev->vid, __func__);
3093 if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
3095 "(%d) %s: invalid virtqueue idx %d.\n",
3096 dev->vid, __func__, queue_id);
3100 vq = dev->virtqueue[queue_id];
3102 if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
3105 if (unlikely(!vq->enabled)) {
3107 goto out_access_unlock;
3110 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3111 vhost_user_iotlb_rd_lock(vq);
3113 if (unlikely(!vq->access_ok))
3114 if (unlikely(vring_translate(dev, vq) < 0)) {
3120 * Construct a RARP broadcast packet, and inject it to the "pkts"
3121 * array, to looks like that guest actually send such packet.
3123 * Check user_send_rarp() for more information.
3125 * broadcast_rarp shares a cacheline in the virtio_net structure
3126 * with some fields that are accessed during enqueue and
3127 * __atomic_compare_exchange_n causes a write if performed compare
3128 * and exchange. This could result in false sharing between enqueue
3131 * Prevent unnecessary false sharing by reading broadcast_rarp first
3132 * and only performing compare and exchange if the read indicates it
3133 * is likely to be set.
3135 if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
3136 __atomic_compare_exchange_n(&dev->broadcast_rarp,
3137 &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
3139 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
3140 if (rarp_mbuf == NULL) {
3141 VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
3146 * Inject it to the head of "pkts" array, so that switch's mac
3147 * learning table will get updated first.
3149 pkts[0] = rarp_mbuf;
3154 if (vq_is_packed(dev)) {
3155 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3156 count = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, pkts, count);
3158 count = virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool, pkts, count);
3160 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
3161 count = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, pkts, count);
3163 count = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
3167 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
3168 vhost_user_iotlb_rd_unlock(vq);
3171 rte_spinlock_unlock(&vq->access_lock);
3173 if (unlikely(rarp_mbuf != NULL))