1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
7 #include <linux/virtio_net.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
13 #include <rte_vhost.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
24 #define MAX_PKT_BURST 32
26 #define MAX_BATCH_LEN 256
28 static __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
31 return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
35 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
37 return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
40 static __rte_always_inline void
41 do_flush_shadow_used_ring_split(struct virtio_net *dev,
42 struct vhost_virtqueue *vq,
43 uint16_t to, uint16_t from, uint16_t size)
45 rte_memcpy(&vq->used->ring[to],
46 &vq->shadow_used_split[from],
47 size * sizeof(struct vring_used_elem));
48 vhost_log_cache_used_vring(dev, vq,
49 offsetof(struct vring_used, ring[to]),
50 size * sizeof(struct vring_used_elem));
53 static __rte_always_inline void
54 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
56 uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
58 if (used_idx + vq->shadow_used_idx <= vq->size) {
59 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
64 /* update used ring interval [used_idx, vq->size] */
65 size = vq->size - used_idx;
66 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
68 /* update the left half used ring interval [0, left_size] */
69 do_flush_shadow_used_ring_split(dev, vq, 0, size,
70 vq->shadow_used_idx - size);
72 vq->last_used_idx += vq->shadow_used_idx;
76 vhost_log_cache_sync(dev, vq);
78 *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
79 vq->shadow_used_idx = 0;
80 vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
81 sizeof(vq->used->idx));
84 static __rte_always_inline void
85 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
86 uint16_t desc_idx, uint32_t len)
88 uint16_t i = vq->shadow_used_idx++;
90 vq->shadow_used_split[i].id = desc_idx;
91 vq->shadow_used_split[i].len = len;
94 static __rte_always_inline void
95 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
96 struct vhost_virtqueue *vq)
99 uint16_t used_idx = vq->last_used_idx;
100 uint16_t head_idx = vq->last_used_idx;
101 uint16_t head_flags = 0;
103 /* Split loop in two to save memory barriers */
104 for (i = 0; i < vq->shadow_used_idx; i++) {
105 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
106 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
108 used_idx += vq->shadow_used_packed[i].count;
109 if (used_idx >= vq->size)
110 used_idx -= vq->size;
115 for (i = 0; i < vq->shadow_used_idx; i++) {
118 if (vq->shadow_used_packed[i].len)
119 flags = VRING_DESC_F_WRITE;
123 if (vq->used_wrap_counter) {
124 flags |= VRING_DESC_F_USED;
125 flags |= VRING_DESC_F_AVAIL;
127 flags &= ~VRING_DESC_F_USED;
128 flags &= ~VRING_DESC_F_AVAIL;
132 vq->desc_packed[vq->last_used_idx].flags = flags;
134 vhost_log_cache_used_vring(dev, vq,
136 sizeof(struct vring_packed_desc),
137 sizeof(struct vring_packed_desc));
139 head_idx = vq->last_used_idx;
143 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
146 vq->desc_packed[head_idx].flags = head_flags;
148 vhost_log_cache_used_vring(dev, vq,
150 sizeof(struct vring_packed_desc),
151 sizeof(struct vring_packed_desc));
153 vq->shadow_used_idx = 0;
154 vhost_log_cache_sync(dev, vq);
157 static __rte_always_inline void
158 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
159 struct vhost_virtqueue *vq,
166 flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
168 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
169 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
170 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
175 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
176 vq->desc_packed[vq->last_used_idx + i].flags = flags;
178 vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
179 sizeof(struct vring_packed_desc),
180 sizeof(struct vring_packed_desc) *
182 vhost_log_cache_sync(dev, vq);
184 vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
187 static __rte_always_inline void
188 flush_shadow_used_ring_packed(struct virtio_net *dev,
189 struct vhost_virtqueue *vq)
192 uint16_t used_idx = vq->last_used_idx;
193 uint16_t head_idx = vq->last_used_idx;
194 uint16_t head_flags = 0;
196 /* Split loop in two to save memory barriers */
197 for (i = 0; i < vq->shadow_used_idx; i++) {
198 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
199 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
201 used_idx += vq->shadow_used_packed[i].count;
202 if (used_idx >= vq->size)
203 used_idx -= vq->size;
206 for (i = 0; i < vq->shadow_used_idx; i++) {
209 if (vq->shadow_used_packed[i].len)
210 flags = VRING_DESC_F_WRITE;
214 if (vq->used_wrap_counter) {
215 flags |= VRING_DESC_F_USED;
216 flags |= VRING_DESC_F_AVAIL;
218 flags &= ~VRING_DESC_F_USED;
219 flags &= ~VRING_DESC_F_AVAIL;
223 vq->desc_packed[vq->last_used_idx].flags = flags;
225 vhost_log_cache_used_vring(dev, vq,
227 sizeof(struct vring_packed_desc),
228 sizeof(struct vring_packed_desc));
230 head_idx = vq->last_used_idx;
234 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
237 __atomic_store_n(&vq->desc_packed[head_idx].flags, head_flags,
240 vhost_log_cache_used_vring(dev, vq,
242 sizeof(struct vring_packed_desc),
243 sizeof(struct vring_packed_desc));
245 vq->shadow_used_idx = 0;
246 vhost_log_cache_sync(dev, vq);
249 static __rte_always_inline void
250 update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
251 uint16_t desc_idx, uint32_t len, uint16_t count)
253 uint16_t i = vq->shadow_used_idx++;
255 vq->shadow_used_packed[i].id = desc_idx;
256 vq->shadow_used_packed[i].len = len;
257 vq->shadow_used_packed[i].count = count;
261 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
263 struct batch_copy_elem *elem = vq->batch_copy_elems;
264 uint16_t count = vq->batch_copy_nb_elems;
267 for (i = 0; i < count; i++) {
268 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
269 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
271 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
274 vq->batch_copy_nb_elems = 0;
278 do_data_copy_dequeue(struct vhost_virtqueue *vq)
280 struct batch_copy_elem *elem = vq->batch_copy_elems;
281 uint16_t count = vq->batch_copy_nb_elems;
284 for (i = 0; i < count; i++)
285 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
287 vq->batch_copy_nb_elems = 0;
290 static __rte_always_inline void
291 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
292 struct vhost_virtqueue *vq,
296 uint16_t num_buffers)
299 for (i = 0; i < num_buffers; i++) {
300 /* enqueue shadow flush action aligned with batch num */
301 if (!vq->shadow_used_idx)
302 vq->shadow_aligned_idx = vq->last_used_idx &
304 vq->shadow_used_packed[vq->shadow_used_idx].id = id[i];
305 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
306 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
307 vq->shadow_aligned_idx += count[i];
308 vq->shadow_used_idx++;
311 if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
312 do_data_copy_enqueue(dev, vq);
313 vhost_flush_enqueue_shadow_packed(dev, vq);
317 /* avoid write operation when necessary, to lessen cache issues */
318 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
319 if ((var) != (val)) \
323 static __rte_always_inline void
324 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
326 uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
328 if (m_buf->ol_flags & PKT_TX_TCP_SEG)
329 csum_l4 |= PKT_TX_TCP_CKSUM;
332 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
333 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
336 case PKT_TX_TCP_CKSUM:
337 net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
340 case PKT_TX_UDP_CKSUM:
341 net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
344 case PKT_TX_SCTP_CKSUM:
345 net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
350 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
351 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
352 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
355 /* IP cksum verification cannot be bypassed, then calculate here */
356 if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
357 struct rte_ipv4_hdr *ipv4_hdr;
359 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
361 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
364 if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
365 if (m_buf->ol_flags & PKT_TX_IPV4)
366 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
368 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
369 net_hdr->gso_size = m_buf->tso_segsz;
370 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
372 } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
373 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
374 net_hdr->gso_size = m_buf->tso_segsz;
375 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
378 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
379 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
380 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
384 static __rte_always_inline int
385 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
386 struct buf_vector *buf_vec, uint16_t *vec_idx,
387 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
389 uint16_t vec_id = *vec_idx;
393 uint64_t desc_chunck_len = desc_len;
395 if (unlikely(vec_id >= BUF_VECTOR_MAX))
398 desc_addr = vhost_iova_to_vva(dev, vq,
402 if (unlikely(!desc_addr))
405 rte_prefetch0((void *)(uintptr_t)desc_addr);
407 buf_vec[vec_id].buf_iova = desc_iova;
408 buf_vec[vec_id].buf_addr = desc_addr;
409 buf_vec[vec_id].buf_len = desc_chunck_len;
411 desc_len -= desc_chunck_len;
412 desc_iova += desc_chunck_len;
420 static __rte_always_inline int
421 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
422 uint32_t avail_idx, uint16_t *vec_idx,
423 struct buf_vector *buf_vec, uint16_t *desc_chain_head,
424 uint32_t *desc_chain_len, uint8_t perm)
426 uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
427 uint16_t vec_id = *vec_idx;
430 uint32_t nr_descs = vq->size;
432 struct vring_desc *descs = vq->desc;
433 struct vring_desc *idesc = NULL;
435 if (unlikely(idx >= vq->size))
438 *desc_chain_head = idx;
440 if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
441 dlen = vq->desc[idx].len;
442 nr_descs = dlen / sizeof(struct vring_desc);
443 if (unlikely(nr_descs > vq->size))
446 descs = (struct vring_desc *)(uintptr_t)
447 vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
450 if (unlikely(!descs))
453 if (unlikely(dlen < vq->desc[idx].len)) {
455 * The indirect desc table is not contiguous
456 * in process VA space, we have to copy it.
458 idesc = vhost_alloc_copy_ind_table(dev, vq,
459 vq->desc[idx].addr, vq->desc[idx].len);
460 if (unlikely(!idesc))
470 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
471 free_ind_table(idesc);
475 len += descs[idx].len;
477 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
478 descs[idx].addr, descs[idx].len,
480 free_ind_table(idesc);
484 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
487 idx = descs[idx].next;
490 *desc_chain_len = len;
493 if (unlikely(!!idesc))
494 free_ind_table(idesc);
500 * Returns -1 on fail, 0 on success
503 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
504 uint32_t size, struct buf_vector *buf_vec,
505 uint16_t *num_buffers, uint16_t avail_head,
509 uint16_t vec_idx = 0;
510 uint16_t max_tries, tries = 0;
512 uint16_t head_idx = 0;
516 cur_idx = vq->last_avail_idx;
518 if (rxvq_is_mergeable(dev))
519 max_tries = vq->size - 1;
524 if (unlikely(cur_idx == avail_head))
527 * if we tried all available ring items, and still
528 * can't get enough buf, it means something abnormal
531 if (unlikely(++tries > max_tries))
534 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
537 VHOST_ACCESS_RW) < 0))
539 len = RTE_MIN(len, size);
540 update_shadow_used_ring_split(vq, head_idx, len);
552 static __rte_always_inline int
553 fill_vec_buf_packed_indirect(struct virtio_net *dev,
554 struct vhost_virtqueue *vq,
555 struct vring_packed_desc *desc, uint16_t *vec_idx,
556 struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
560 uint16_t vec_id = *vec_idx;
562 struct vring_packed_desc *descs, *idescs = NULL;
565 descs = (struct vring_packed_desc *)(uintptr_t)
566 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
567 if (unlikely(!descs))
570 if (unlikely(dlen < desc->len)) {
572 * The indirect desc table is not contiguous
573 * in process VA space, we have to copy it.
575 idescs = vhost_alloc_copy_ind_table(dev,
576 vq, desc->addr, desc->len);
577 if (unlikely(!idescs))
583 nr_descs = desc->len / sizeof(struct vring_packed_desc);
584 if (unlikely(nr_descs >= vq->size)) {
585 free_ind_table(idescs);
589 for (i = 0; i < nr_descs; i++) {
590 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
591 free_ind_table(idescs);
595 *len += descs[i].len;
596 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
597 descs[i].addr, descs[i].len,
603 if (unlikely(!!idescs))
604 free_ind_table(idescs);
609 static __rte_always_inline int
610 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
611 uint16_t avail_idx, uint16_t *desc_count,
612 struct buf_vector *buf_vec, uint16_t *vec_idx,
613 uint16_t *buf_id, uint32_t *len, uint8_t perm)
615 bool wrap_counter = vq->avail_wrap_counter;
616 struct vring_packed_desc *descs = vq->desc_packed;
617 uint16_t vec_id = *vec_idx;
619 if (avail_idx < vq->last_avail_idx)
623 * Perform a load-acquire barrier in desc_is_avail to
624 * enforce the ordering between desc flags and desc
627 if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
634 if (unlikely(vec_id >= BUF_VECTOR_MAX))
637 if (unlikely(*desc_count >= vq->size))
641 *buf_id = descs[avail_idx].id;
643 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
644 if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
650 *len += descs[avail_idx].len;
652 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
653 descs[avail_idx].addr,
654 descs[avail_idx].len,
659 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
662 if (++avail_idx >= vq->size) {
663 avail_idx -= vq->size;
674 * Returns -1 on fail, 0 on success
677 reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
678 uint32_t size, struct buf_vector *buf_vec,
679 uint16_t *nr_vec, uint16_t *num_buffers,
683 uint16_t vec_idx = 0;
684 uint16_t max_tries, tries = 0;
691 avail_idx = vq->last_avail_idx;
693 if (rxvq_is_mergeable(dev))
694 max_tries = vq->size - 1;
700 * if we tried all available ring items, and still
701 * can't get enough buf, it means something abnormal
704 if (unlikely(++tries > max_tries))
707 if (unlikely(fill_vec_buf_packed(dev, vq,
708 avail_idx, &desc_count,
711 VHOST_ACCESS_RW) < 0))
714 len = RTE_MIN(len, size);
715 update_shadow_used_ring_packed(vq, buf_id, len, desc_count);
718 avail_idx += desc_count;
719 if (avail_idx >= vq->size)
720 avail_idx -= vq->size;
722 *nr_descs += desc_count;
731 static __rte_noinline void
732 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
733 struct buf_vector *buf_vec,
734 struct virtio_net_hdr_mrg_rxbuf *hdr)
737 uint64_t remain = dev->vhost_hlen;
738 uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
739 uint64_t iova = buf_vec->buf_iova;
742 len = RTE_MIN(remain,
744 dst = buf_vec->buf_addr;
745 rte_memcpy((void *)(uintptr_t)dst,
746 (void *)(uintptr_t)src,
749 PRINT_PACKET(dev, (uintptr_t)dst,
751 vhost_log_cache_write_iova(dev, vq,
761 static __rte_always_inline int
762 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
763 struct rte_mbuf *m, struct buf_vector *buf_vec,
764 uint16_t nr_vec, uint16_t num_buffers)
766 uint32_t vec_idx = 0;
767 uint32_t mbuf_offset, mbuf_avail;
768 uint32_t buf_offset, buf_avail;
769 uint64_t buf_addr, buf_iova, buf_len;
772 struct rte_mbuf *hdr_mbuf;
773 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
774 struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
777 if (unlikely(m == NULL)) {
782 buf_addr = buf_vec[vec_idx].buf_addr;
783 buf_iova = buf_vec[vec_idx].buf_iova;
784 buf_len = buf_vec[vec_idx].buf_len;
786 if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
793 if (unlikely(buf_len < dev->vhost_hlen))
796 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
798 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
799 dev->vid, num_buffers);
801 if (unlikely(buf_len < dev->vhost_hlen)) {
802 buf_offset = dev->vhost_hlen - buf_len;
804 buf_addr = buf_vec[vec_idx].buf_addr;
805 buf_iova = buf_vec[vec_idx].buf_iova;
806 buf_len = buf_vec[vec_idx].buf_len;
807 buf_avail = buf_len - buf_offset;
809 buf_offset = dev->vhost_hlen;
810 buf_avail = buf_len - dev->vhost_hlen;
813 mbuf_avail = rte_pktmbuf_data_len(m);
815 while (mbuf_avail != 0 || m->next != NULL) {
816 /* done with current buf, get the next one */
817 if (buf_avail == 0) {
819 if (unlikely(vec_idx >= nr_vec)) {
824 buf_addr = buf_vec[vec_idx].buf_addr;
825 buf_iova = buf_vec[vec_idx].buf_iova;
826 buf_len = buf_vec[vec_idx].buf_len;
832 /* done with current mbuf, get the next one */
833 if (mbuf_avail == 0) {
837 mbuf_avail = rte_pktmbuf_data_len(m);
841 virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
842 if (rxvq_is_mergeable(dev))
843 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
846 if (unlikely(hdr == &tmp_hdr)) {
847 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
849 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
851 vhost_log_cache_write_iova(dev, vq,
859 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
861 if (likely(cpy_len > MAX_BATCH_LEN ||
862 vq->batch_copy_nb_elems >= vq->size)) {
863 rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
864 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
866 vhost_log_cache_write_iova(dev, vq,
867 buf_iova + buf_offset,
869 PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
872 batch_copy[vq->batch_copy_nb_elems].dst =
873 (void *)((uintptr_t)(buf_addr + buf_offset));
874 batch_copy[vq->batch_copy_nb_elems].src =
875 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
876 batch_copy[vq->batch_copy_nb_elems].log_addr =
877 buf_iova + buf_offset;
878 batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
879 vq->batch_copy_nb_elems++;
882 mbuf_avail -= cpy_len;
883 mbuf_offset += cpy_len;
884 buf_avail -= cpy_len;
885 buf_offset += cpy_len;
893 static __rte_always_inline int
894 vhost_enqueue_single_packed(struct virtio_net *dev,
895 struct vhost_virtqueue *vq,
896 struct rte_mbuf *pkt,
897 struct buf_vector *buf_vec,
901 uint16_t avail_idx = vq->last_avail_idx;
902 uint16_t max_tries, tries = 0;
906 uint32_t size = pkt->pkt_len + dev->vhost_hlen;
907 uint16_t num_buffers = 0;
908 uint32_t buffer_len[vq->size];
909 uint16_t buffer_buf_id[vq->size];
910 uint16_t buffer_desc_count[vq->size];
912 if (rxvq_is_mergeable(dev))
913 max_tries = vq->size - 1;
919 * if we tried all available ring items, and still
920 * can't get enough buf, it means something abnormal
923 if (unlikely(++tries > max_tries))
926 if (unlikely(fill_vec_buf_packed(dev, vq,
927 avail_idx, &desc_count,
930 VHOST_ACCESS_RW) < 0))
933 len = RTE_MIN(len, size);
936 buffer_len[num_buffers] = len;
937 buffer_buf_id[num_buffers] = buf_id;
938 buffer_desc_count[num_buffers] = desc_count;
941 *nr_descs += desc_count;
942 avail_idx += desc_count;
943 if (avail_idx >= vq->size)
944 avail_idx -= vq->size;
947 if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
950 vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
951 buffer_desc_count, num_buffers);
956 static __rte_noinline uint32_t
957 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
958 struct rte_mbuf **pkts, uint32_t count)
960 uint32_t pkt_idx = 0;
961 uint16_t num_buffers;
962 struct buf_vector buf_vec[BUF_VECTOR_MAX];
965 avail_head = *((volatile uint16_t *)&vq->avail->idx);
968 * The ordering between avail index and
969 * desc reads needs to be enforced.
973 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
975 for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
976 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
979 if (unlikely(reserve_avail_buf_split(dev, vq,
980 pkt_len, buf_vec, &num_buffers,
981 avail_head, &nr_vec) < 0)) {
982 VHOST_LOG_DEBUG(VHOST_DATA,
983 "(%d) failed to get enough desc from vring\n",
985 vq->shadow_used_idx -= num_buffers;
989 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
990 dev->vid, vq->last_avail_idx,
991 vq->last_avail_idx + num_buffers);
993 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
996 vq->shadow_used_idx -= num_buffers;
1000 vq->last_avail_idx += num_buffers;
1003 do_data_copy_enqueue(dev, vq);
1005 if (likely(vq->shadow_used_idx)) {
1006 flush_shadow_used_ring_split(dev, vq);
1007 vhost_vring_call_split(dev, vq);
1013 static __rte_unused int
1014 virtio_dev_rx_batch_packed(struct virtio_net *dev,
1015 struct vhost_virtqueue *vq,
1016 struct rte_mbuf **pkts)
1018 bool wrap_counter = vq->avail_wrap_counter;
1019 struct vring_packed_desc *descs = vq->desc_packed;
1020 uint16_t avail_idx = vq->last_avail_idx;
1021 uint64_t desc_addrs[PACKED_BATCH_SIZE];
1022 struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1023 uint32_t buf_offset = dev->vhost_hlen;
1024 uint64_t lens[PACKED_BATCH_SIZE];
1025 uint16_t ids[PACKED_BATCH_SIZE];
1028 if (unlikely(avail_idx & PACKED_BATCH_MASK))
1031 if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1034 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1035 if (unlikely(pkts[i]->next != NULL))
1037 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1044 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1045 lens[i] = descs[avail_idx + i].len;
1047 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1048 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1052 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1053 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1054 descs[avail_idx + i].addr,
1058 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1059 if (unlikely(lens[i] != descs[avail_idx + i].len))
1063 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1064 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1065 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1066 (uintptr_t)desc_addrs[i];
1067 lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
1070 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1071 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1073 vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1075 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1076 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1077 rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1081 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1082 ids[i] = descs[avail_idx + i].id;
1084 vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1089 static __rte_unused int16_t
1090 virtio_dev_rx_single_packed(struct virtio_net *dev,
1091 struct vhost_virtqueue *vq,
1092 struct rte_mbuf *pkt)
1094 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1095 uint16_t nr_descs = 0;
1098 if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1100 VHOST_LOG_DEBUG(VHOST_DATA,
1101 "(%d) failed to get enough desc from vring\n",
1106 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1107 dev->vid, vq->last_avail_idx,
1108 vq->last_avail_idx + nr_descs);
1110 vq_inc_last_avail_packed(vq, nr_descs);
1115 static __rte_noinline uint32_t
1116 virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1117 struct rte_mbuf **pkts, uint32_t count)
1119 uint32_t pkt_idx = 0;
1120 uint16_t num_buffers;
1121 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1123 for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1124 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1125 uint16_t nr_vec = 0;
1126 uint16_t nr_descs = 0;
1128 if (unlikely(reserve_avail_buf_packed(dev, vq,
1129 pkt_len, buf_vec, &nr_vec,
1130 &num_buffers, &nr_descs) < 0)) {
1131 VHOST_LOG_DEBUG(VHOST_DATA,
1132 "(%d) failed to get enough desc from vring\n",
1134 vq->shadow_used_idx -= num_buffers;
1138 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1139 dev->vid, vq->last_avail_idx,
1140 vq->last_avail_idx + num_buffers);
1142 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1145 vq->shadow_used_idx -= num_buffers;
1149 vq_inc_last_avail_packed(vq, nr_descs);
1152 do_data_copy_enqueue(dev, vq);
1154 if (likely(vq->shadow_used_idx)) {
1155 vhost_flush_enqueue_shadow_packed(dev, vq);
1156 vhost_vring_call_packed(dev, vq);
1162 static __rte_always_inline uint32_t
1163 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1164 struct rte_mbuf **pkts, uint32_t count)
1166 struct vhost_virtqueue *vq;
1169 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1170 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1171 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1172 dev->vid, __func__, queue_id);
1176 vq = dev->virtqueue[queue_id];
1178 rte_spinlock_lock(&vq->access_lock);
1180 if (unlikely(vq->enabled == 0))
1181 goto out_access_unlock;
1183 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1184 vhost_user_iotlb_rd_lock(vq);
1186 if (unlikely(vq->access_ok == 0))
1187 if (unlikely(vring_translate(dev, vq) < 0))
1190 count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1194 if (vq_is_packed(dev))
1195 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1197 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1200 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1201 vhost_user_iotlb_rd_unlock(vq);
1204 rte_spinlock_unlock(&vq->access_lock);
1210 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1211 struct rte_mbuf **pkts, uint16_t count)
1213 struct virtio_net *dev = get_device(vid);
1218 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1219 RTE_LOG(ERR, VHOST_DATA,
1220 "(%d) %s: built-in vhost net backend is disabled.\n",
1221 dev->vid, __func__);
1225 return virtio_dev_rx(dev, queue_id, pkts, count);
1229 virtio_net_with_host_offload(struct virtio_net *dev)
1232 ((1ULL << VIRTIO_NET_F_CSUM) |
1233 (1ULL << VIRTIO_NET_F_HOST_ECN) |
1234 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1235 (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1236 (1ULL << VIRTIO_NET_F_HOST_UFO)))
1243 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1245 struct rte_ipv4_hdr *ipv4_hdr;
1246 struct rte_ipv6_hdr *ipv6_hdr;
1247 void *l3_hdr = NULL;
1248 struct rte_ether_hdr *eth_hdr;
1251 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1253 m->l2_len = sizeof(struct rte_ether_hdr);
1254 ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1256 if (ethertype == RTE_ETHER_TYPE_VLAN) {
1257 struct rte_vlan_hdr *vlan_hdr =
1258 (struct rte_vlan_hdr *)(eth_hdr + 1);
1260 m->l2_len += sizeof(struct rte_vlan_hdr);
1261 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1264 l3_hdr = (char *)eth_hdr + m->l2_len;
1266 switch (ethertype) {
1267 case RTE_ETHER_TYPE_IPV4:
1269 *l4_proto = ipv4_hdr->next_proto_id;
1270 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
1271 *l4_hdr = (char *)l3_hdr + m->l3_len;
1272 m->ol_flags |= PKT_TX_IPV4;
1274 case RTE_ETHER_TYPE_IPV6:
1276 *l4_proto = ipv6_hdr->proto;
1277 m->l3_len = sizeof(struct rte_ipv6_hdr);
1278 *l4_hdr = (char *)l3_hdr + m->l3_len;
1279 m->ol_flags |= PKT_TX_IPV6;
1289 static __rte_always_inline void
1290 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1292 uint16_t l4_proto = 0;
1293 void *l4_hdr = NULL;
1294 struct rte_tcp_hdr *tcp_hdr = NULL;
1296 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1299 parse_ethernet(m, &l4_proto, &l4_hdr);
1300 if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1301 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1302 switch (hdr->csum_offset) {
1303 case (offsetof(struct rte_tcp_hdr, cksum)):
1304 if (l4_proto == IPPROTO_TCP)
1305 m->ol_flags |= PKT_TX_TCP_CKSUM;
1307 case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1308 if (l4_proto == IPPROTO_UDP)
1309 m->ol_flags |= PKT_TX_UDP_CKSUM;
1311 case (offsetof(struct rte_sctp_hdr, cksum)):
1312 if (l4_proto == IPPROTO_SCTP)
1313 m->ol_flags |= PKT_TX_SCTP_CKSUM;
1321 if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1322 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1323 case VIRTIO_NET_HDR_GSO_TCPV4:
1324 case VIRTIO_NET_HDR_GSO_TCPV6:
1326 m->ol_flags |= PKT_TX_TCP_SEG;
1327 m->tso_segsz = hdr->gso_size;
1328 m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1330 case VIRTIO_NET_HDR_GSO_UDP:
1331 m->ol_flags |= PKT_TX_UDP_SEG;
1332 m->tso_segsz = hdr->gso_size;
1333 m->l4_len = sizeof(struct rte_udp_hdr);
1336 RTE_LOG(WARNING, VHOST_DATA,
1337 "unsupported gso type %u.\n", hdr->gso_type);
1343 static __rte_noinline void
1344 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1345 struct buf_vector *buf_vec)
1348 uint64_t remain = sizeof(struct virtio_net_hdr);
1350 uint64_t dst = (uint64_t)(uintptr_t)hdr;
1353 len = RTE_MIN(remain, buf_vec->buf_len);
1354 src = buf_vec->buf_addr;
1355 rte_memcpy((void *)(uintptr_t)dst,
1356 (void *)(uintptr_t)src, len);
1364 static __rte_always_inline int
1365 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1366 struct buf_vector *buf_vec, uint16_t nr_vec,
1367 struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1369 uint32_t buf_avail, buf_offset;
1370 uint64_t buf_addr, buf_iova, buf_len;
1371 uint32_t mbuf_avail, mbuf_offset;
1373 struct rte_mbuf *cur = m, *prev = m;
1374 struct virtio_net_hdr tmp_hdr;
1375 struct virtio_net_hdr *hdr = NULL;
1376 /* A counter to avoid desc dead loop chain */
1377 uint16_t vec_idx = 0;
1378 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1381 buf_addr = buf_vec[vec_idx].buf_addr;
1382 buf_iova = buf_vec[vec_idx].buf_iova;
1383 buf_len = buf_vec[vec_idx].buf_len;
1385 if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1390 if (virtio_net_with_host_offload(dev)) {
1391 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1393 * No luck, the virtio-net header doesn't fit
1394 * in a contiguous virtual area.
1396 copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1399 hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1404 * A virtio driver normally uses at least 2 desc buffers
1405 * for Tx: the first for storing the header, and others
1406 * for storing the data.
1408 if (unlikely(buf_len < dev->vhost_hlen)) {
1409 buf_offset = dev->vhost_hlen - buf_len;
1411 buf_addr = buf_vec[vec_idx].buf_addr;
1412 buf_iova = buf_vec[vec_idx].buf_iova;
1413 buf_len = buf_vec[vec_idx].buf_len;
1414 buf_avail = buf_len - buf_offset;
1415 } else if (buf_len == dev->vhost_hlen) {
1416 if (unlikely(++vec_idx >= nr_vec))
1418 buf_addr = buf_vec[vec_idx].buf_addr;
1419 buf_iova = buf_vec[vec_idx].buf_iova;
1420 buf_len = buf_vec[vec_idx].buf_len;
1423 buf_avail = buf_len;
1425 buf_offset = dev->vhost_hlen;
1426 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1430 (uintptr_t)(buf_addr + buf_offset),
1431 (uint32_t)buf_avail, 0);
1434 mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
1438 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1441 * A desc buf might across two host physical pages that are
1442 * not continuous. In such case (gpa_to_hpa returns 0), data
1443 * will be copied even though zero copy is enabled.
1445 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1446 buf_iova + buf_offset, cpy_len)))) {
1447 cur->data_len = cpy_len;
1450 (void *)(uintptr_t)(buf_addr + buf_offset);
1451 cur->buf_iova = hpa;
1454 * In zero copy mode, one mbuf can only reference data
1455 * for one or partial of one desc buff.
1457 mbuf_avail = cpy_len;
1459 if (likely(cpy_len > MAX_BATCH_LEN ||
1460 vq->batch_copy_nb_elems >= vq->size ||
1461 (hdr && cur == m))) {
1462 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1464 (void *)((uintptr_t)(buf_addr +
1468 batch_copy[vq->batch_copy_nb_elems].dst =
1469 rte_pktmbuf_mtod_offset(cur, void *,
1471 batch_copy[vq->batch_copy_nb_elems].src =
1472 (void *)((uintptr_t)(buf_addr +
1474 batch_copy[vq->batch_copy_nb_elems].len =
1476 vq->batch_copy_nb_elems++;
1480 mbuf_avail -= cpy_len;
1481 mbuf_offset += cpy_len;
1482 buf_avail -= cpy_len;
1483 buf_offset += cpy_len;
1485 /* This buf reaches to its end, get the next one */
1486 if (buf_avail == 0) {
1487 if (++vec_idx >= nr_vec)
1490 buf_addr = buf_vec[vec_idx].buf_addr;
1491 buf_iova = buf_vec[vec_idx].buf_iova;
1492 buf_len = buf_vec[vec_idx].buf_len;
1495 buf_avail = buf_len;
1497 PRINT_PACKET(dev, (uintptr_t)buf_addr,
1498 (uint32_t)buf_avail, 0);
1502 * This mbuf reaches to its end, get a new one
1503 * to hold more data.
1505 if (mbuf_avail == 0) {
1506 cur = rte_pktmbuf_alloc(mbuf_pool);
1507 if (unlikely(cur == NULL)) {
1508 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1509 "allocate memory for mbuf.\n");
1513 if (unlikely(dev->dequeue_zero_copy))
1514 rte_mbuf_refcnt_update(cur, 1);
1517 prev->data_len = mbuf_offset;
1519 m->pkt_len += mbuf_offset;
1523 mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1527 prev->data_len = mbuf_offset;
1528 m->pkt_len += mbuf_offset;
1531 vhost_dequeue_offload(hdr, m);
1538 static __rte_always_inline struct zcopy_mbuf *
1539 get_zmbuf(struct vhost_virtqueue *vq)
1545 /* search [last_zmbuf_idx, zmbuf_size) */
1546 i = vq->last_zmbuf_idx;
1547 last = vq->zmbuf_size;
1550 for (; i < last; i++) {
1551 if (vq->zmbufs[i].in_use == 0) {
1552 vq->last_zmbuf_idx = i + 1;
1553 vq->zmbufs[i].in_use = 1;
1554 return &vq->zmbufs[i];
1560 /* search [0, last_zmbuf_idx) */
1562 last = vq->last_zmbuf_idx;
1570 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
1576 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
1578 struct rte_mbuf_ext_shared_info *shinfo = NULL;
1579 uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
1584 /* Try to use pkt buffer to store shinfo to reduce the amount of memory
1585 * required, otherwise store shinfo in the new buffer.
1587 if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
1588 shinfo = rte_pktmbuf_mtod(pkt,
1589 struct rte_mbuf_ext_shared_info *);
1591 total_len += sizeof(*shinfo) + sizeof(uintptr_t);
1592 total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
1595 if (unlikely(total_len > UINT16_MAX))
1598 buf_len = total_len;
1599 buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
1600 if (unlikely(buf == NULL))
1603 /* Initialize shinfo */
1605 shinfo->free_cb = virtio_dev_extbuf_free;
1606 shinfo->fcb_opaque = buf;
1607 rte_mbuf_ext_refcnt_set(shinfo, 1);
1609 shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
1610 virtio_dev_extbuf_free, buf);
1611 if (unlikely(shinfo == NULL)) {
1613 RTE_LOG(ERR, VHOST_DATA, "Failed to init shinfo\n");
1618 iova = rte_malloc_virt2iova(buf);
1619 rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
1620 rte_pktmbuf_reset_headroom(pkt);
1626 * Allocate a host supported pktmbuf.
1628 static __rte_always_inline struct rte_mbuf *
1629 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
1632 struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
1634 if (unlikely(pkt == NULL))
1637 if (rte_pktmbuf_tailroom(pkt) >= data_len)
1640 /* attach an external buffer if supported */
1641 if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
1644 /* check if chained buffers are allowed */
1645 if (!dev->linearbuf)
1648 /* Data doesn't fit into the buffer and the host supports
1649 * only linear buffers
1651 rte_pktmbuf_free(pkt);
1656 static __rte_noinline uint16_t
1657 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1658 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1661 uint16_t free_entries;
1663 if (unlikely(dev->dequeue_zero_copy)) {
1664 struct zcopy_mbuf *zmbuf, *next;
1666 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1667 zmbuf != NULL; zmbuf = next) {
1668 next = TAILQ_NEXT(zmbuf, next);
1670 if (mbuf_is_consumed(zmbuf->mbuf)) {
1671 update_shadow_used_ring_split(vq,
1672 zmbuf->desc_idx, 0);
1673 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1674 restore_mbuf(zmbuf->mbuf);
1675 rte_pktmbuf_free(zmbuf->mbuf);
1681 if (likely(vq->shadow_used_idx)) {
1682 flush_shadow_used_ring_split(dev, vq);
1683 vhost_vring_call_split(dev, vq);
1687 free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1689 if (free_entries == 0)
1693 * The ordering between avail index and
1694 * desc reads needs to be enforced.
1698 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1700 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1702 count = RTE_MIN(count, MAX_PKT_BURST);
1703 count = RTE_MIN(count, free_entries);
1704 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1707 for (i = 0; i < count; i++) {
1708 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1711 uint16_t nr_vec = 0;
1714 if (unlikely(fill_vec_buf_split(dev, vq,
1715 vq->last_avail_idx + i,
1717 &head_idx, &buf_len,
1718 VHOST_ACCESS_RO) < 0))
1721 if (likely(dev->dequeue_zero_copy == 0))
1722 update_shadow_used_ring_split(vq, head_idx, 0);
1724 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1725 if (unlikely(pkts[i] == NULL))
1728 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1730 if (unlikely(err)) {
1731 rte_pktmbuf_free(pkts[i]);
1735 if (unlikely(dev->dequeue_zero_copy)) {
1736 struct zcopy_mbuf *zmbuf;
1738 zmbuf = get_zmbuf(vq);
1740 rte_pktmbuf_free(pkts[i]);
1743 zmbuf->mbuf = pkts[i];
1744 zmbuf->desc_idx = head_idx;
1747 * Pin lock the mbuf; we will check later to see
1748 * whether the mbuf is freed (when we are the last
1749 * user) or not. If that's the case, we then could
1750 * update the used ring safely.
1752 rte_mbuf_refcnt_update(pkts[i], 1);
1755 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1758 vq->last_avail_idx += i;
1760 if (likely(dev->dequeue_zero_copy == 0)) {
1761 do_data_copy_dequeue(vq);
1762 if (unlikely(i < count))
1763 vq->shadow_used_idx = i;
1764 if (likely(vq->shadow_used_idx)) {
1765 flush_shadow_used_ring_split(dev, vq);
1766 vhost_vring_call_split(dev, vq);
1773 static __rte_always_inline int
1774 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
1775 struct vhost_virtqueue *vq,
1776 struct rte_mempool *mbuf_pool,
1777 struct rte_mbuf **pkts,
1779 uintptr_t *desc_addrs,
1782 bool wrap = vq->avail_wrap_counter;
1783 struct vring_packed_desc *descs = vq->desc_packed;
1784 struct virtio_net_hdr *hdr;
1785 uint64_t lens[PACKED_BATCH_SIZE];
1786 uint64_t buf_lens[PACKED_BATCH_SIZE];
1787 uint32_t buf_offset = dev->vhost_hlen;
1790 if (unlikely(avail_idx & PACKED_BATCH_MASK))
1792 if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1795 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1796 flags = descs[avail_idx + i].flags;
1797 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
1798 (wrap == !!(flags & VRING_DESC_F_USED)) ||
1799 (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
1805 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1806 lens[i] = descs[avail_idx + i].len;
1808 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1809 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1810 descs[avail_idx + i].addr,
1811 &lens[i], VHOST_ACCESS_RW);
1814 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1815 if (unlikely((lens[i] != descs[avail_idx + i].len)))
1819 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1820 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
1825 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1826 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
1828 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1829 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
1833 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1834 pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
1835 pkts[i]->data_len = pkts[i]->pkt_len;
1836 ids[i] = descs[avail_idx + i].id;
1839 if (virtio_net_with_host_offload(dev)) {
1840 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1841 hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
1842 vhost_dequeue_offload(hdr, pkts[i]);
1849 for (i = 0; i < PACKED_BATCH_SIZE; i++)
1850 rte_pktmbuf_free(pkts[i]);
1855 static __rte_unused int
1856 virtio_dev_tx_batch_packed(struct virtio_net *dev,
1857 struct vhost_virtqueue *vq,
1858 struct rte_mempool *mbuf_pool,
1859 struct rte_mbuf **pkts)
1861 uint16_t avail_idx = vq->last_avail_idx;
1862 uint32_t buf_offset = dev->vhost_hlen;
1863 uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1864 uint16_t ids[PACKED_BATCH_SIZE];
1867 if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1868 avail_idx, desc_addrs, ids))
1871 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1872 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1874 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1875 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1876 (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1879 vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1884 static __rte_always_inline int
1885 vhost_dequeue_single_packed(struct virtio_net *dev,
1886 struct vhost_virtqueue *vq,
1887 struct rte_mempool *mbuf_pool,
1888 struct rte_mbuf **pkts,
1890 uint16_t *desc_count)
1892 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1894 uint16_t nr_vec = 0;
1897 if (unlikely(fill_vec_buf_packed(dev, vq,
1898 vq->last_avail_idx, desc_count,
1901 VHOST_ACCESS_RO) < 0))
1904 *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1905 if (unlikely(*pkts == NULL)) {
1906 RTE_LOG(ERR, VHOST_DATA,
1907 "Failed to allocate memory for mbuf.\n");
1911 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
1913 if (unlikely(err)) {
1914 rte_pktmbuf_free(*pkts);
1921 static __rte_unused int
1922 virtio_dev_tx_single_packed(struct virtio_net *dev,
1923 struct vhost_virtqueue *vq,
1924 struct rte_mempool *mbuf_pool,
1925 struct rte_mbuf **pkts)
1928 uint16_t buf_id, desc_count;
1930 if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
1934 vq_inc_last_avail_packed(vq, desc_count);
1939 static __rte_noinline uint16_t
1940 virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1941 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1945 if (unlikely(dev->dequeue_zero_copy)) {
1946 struct zcopy_mbuf *zmbuf, *next;
1948 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1949 zmbuf != NULL; zmbuf = next) {
1950 next = TAILQ_NEXT(zmbuf, next);
1952 if (mbuf_is_consumed(zmbuf->mbuf)) {
1953 update_shadow_used_ring_packed(vq,
1958 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1959 restore_mbuf(zmbuf->mbuf);
1960 rte_pktmbuf_free(zmbuf->mbuf);
1966 if (likely(vq->shadow_used_idx)) {
1967 flush_shadow_used_ring_packed(dev, vq);
1968 vhost_vring_call_packed(dev, vq);
1972 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1974 count = RTE_MIN(count, MAX_PKT_BURST);
1975 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1978 for (i = 0; i < count; i++) {
1979 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1982 uint16_t desc_count, nr_vec = 0;
1985 if (unlikely(fill_vec_buf_packed(dev, vq,
1986 vq->last_avail_idx, &desc_count,
1989 VHOST_ACCESS_RO) < 0))
1992 if (likely(dev->dequeue_zero_copy == 0))
1993 update_shadow_used_ring_packed(vq, buf_id, 0,
1996 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1997 if (unlikely(pkts[i] == NULL))
2000 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2002 if (unlikely(err)) {
2003 rte_pktmbuf_free(pkts[i]);
2007 if (unlikely(dev->dequeue_zero_copy)) {
2008 struct zcopy_mbuf *zmbuf;
2010 zmbuf = get_zmbuf(vq);
2012 rte_pktmbuf_free(pkts[i]);
2015 zmbuf->mbuf = pkts[i];
2016 zmbuf->desc_idx = buf_id;
2017 zmbuf->desc_count = desc_count;
2020 * Pin lock the mbuf; we will check later to see
2021 * whether the mbuf is freed (when we are the last
2022 * user) or not. If that's the case, we then could
2023 * update the used ring safely.
2025 rte_mbuf_refcnt_update(pkts[i], 1);
2028 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
2031 vq_inc_last_avail_packed(vq, desc_count);
2034 if (likely(dev->dequeue_zero_copy == 0)) {
2035 do_data_copy_dequeue(vq);
2036 if (unlikely(i < count))
2037 vq->shadow_used_idx = i;
2038 if (likely(vq->shadow_used_idx)) {
2039 flush_shadow_used_ring_packed(dev, vq);
2040 vhost_vring_call_packed(dev, vq);
2048 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2049 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2051 struct virtio_net *dev;
2052 struct rte_mbuf *rarp_mbuf = NULL;
2053 struct vhost_virtqueue *vq;
2055 dev = get_device(vid);
2059 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2060 RTE_LOG(ERR, VHOST_DATA,
2061 "(%d) %s: built-in vhost net backend is disabled.\n",
2062 dev->vid, __func__);
2066 if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2067 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
2068 dev->vid, __func__, queue_id);
2072 vq = dev->virtqueue[queue_id];
2074 if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2077 if (unlikely(vq->enabled == 0)) {
2079 goto out_access_unlock;
2082 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2083 vhost_user_iotlb_rd_lock(vq);
2085 if (unlikely(vq->access_ok == 0))
2086 if (unlikely(vring_translate(dev, vq) < 0)) {
2092 * Construct a RARP broadcast packet, and inject it to the "pkts"
2093 * array, to looks like that guest actually send such packet.
2095 * Check user_send_rarp() for more information.
2097 * broadcast_rarp shares a cacheline in the virtio_net structure
2098 * with some fields that are accessed during enqueue and
2099 * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
2100 * result in false sharing between enqueue and dequeue.
2102 * Prevent unnecessary false sharing by reading broadcast_rarp first
2103 * and only performing cmpset if the read indicates it is likely to
2106 if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
2107 rte_atomic16_cmpset((volatile uint16_t *)
2108 &dev->broadcast_rarp.cnt, 1, 0))) {
2110 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2111 if (rarp_mbuf == NULL) {
2112 RTE_LOG(ERR, VHOST_DATA,
2113 "Failed to make RARP packet.\n");
2120 if (vq_is_packed(dev))
2121 count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
2123 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2126 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2127 vhost_user_iotlb_rd_unlock(vq);
2130 rte_spinlock_unlock(&vq->access_lock);
2132 if (unlikely(rarp_mbuf != NULL)) {
2134 * Inject it to the head of "pkts" array, so that switch's mac
2135 * learning table will get updated first.
2137 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2138 pkts[0] = rarp_mbuf;