1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
7 #include <linux/virtio_net.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
13 #include <rte_vhost.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
24 #define MAX_PKT_BURST 32
26 #define MAX_BATCH_LEN 256
29 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
31 return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
34 static __rte_always_inline struct vring_desc *
35 alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
36 struct vring_desc *desc)
38 struct vring_desc *idesc;
40 uint64_t len, remain = desc->len;
41 uint64_t desc_addr = desc->addr;
43 idesc = rte_malloc(__func__, desc->len, 0);
47 dst = (uint64_t)(uintptr_t)idesc;
51 src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
53 if (unlikely(!src || !len)) {
58 rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
68 static __rte_always_inline void
69 free_ind_table(struct vring_desc *idesc)
74 static __rte_always_inline void
75 do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
76 uint16_t to, uint16_t from, uint16_t size)
78 rte_memcpy(&vq->used->ring[to],
79 &vq->shadow_used_ring[from],
80 size * sizeof(struct vring_used_elem));
81 vhost_log_used_vring(dev, vq,
82 offsetof(struct vring_used, ring[to]),
83 size * sizeof(struct vring_used_elem));
86 static __rte_always_inline void
87 flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
89 uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
91 if (used_idx + vq->shadow_used_idx <= vq->size) {
92 do_flush_shadow_used_ring(dev, vq, used_idx, 0,
97 /* update used ring interval [used_idx, vq->size] */
98 size = vq->size - used_idx;
99 do_flush_shadow_used_ring(dev, vq, used_idx, 0, size);
101 /* update the left half used ring interval [0, left_size] */
102 do_flush_shadow_used_ring(dev, vq, 0, size,
103 vq->shadow_used_idx - size);
105 vq->last_used_idx += vq->shadow_used_idx;
109 *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
110 vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
111 sizeof(vq->used->idx));
114 static __rte_always_inline void
115 update_shadow_used_ring(struct vhost_virtqueue *vq,
116 uint16_t desc_idx, uint16_t len)
118 uint16_t i = vq->shadow_used_idx++;
120 vq->shadow_used_ring[i].id = desc_idx;
121 vq->shadow_used_ring[i].len = len;
125 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
127 struct batch_copy_elem *elem = vq->batch_copy_elems;
128 uint16_t count = vq->batch_copy_nb_elems;
131 for (i = 0; i < count; i++) {
132 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
133 vhost_log_write(dev, elem[i].log_addr, elem[i].len);
134 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
139 do_data_copy_dequeue(struct vhost_virtqueue *vq)
141 struct batch_copy_elem *elem = vq->batch_copy_elems;
142 uint16_t count = vq->batch_copy_nb_elems;
145 for (i = 0; i < count; i++)
146 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
149 /* avoid write operation when necessary, to lessen cache issues */
150 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
151 if ((var) != (val)) \
156 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
158 uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
160 if (m_buf->ol_flags & PKT_TX_TCP_SEG)
161 csum_l4 |= PKT_TX_TCP_CKSUM;
164 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
165 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
168 case PKT_TX_TCP_CKSUM:
169 net_hdr->csum_offset = (offsetof(struct tcp_hdr,
172 case PKT_TX_UDP_CKSUM:
173 net_hdr->csum_offset = (offsetof(struct udp_hdr,
176 case PKT_TX_SCTP_CKSUM:
177 net_hdr->csum_offset = (offsetof(struct sctp_hdr,
182 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
183 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
184 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
187 /* IP cksum verification cannot be bypassed, then calculate here */
188 if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
189 struct ipv4_hdr *ipv4_hdr;
191 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *,
193 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
196 if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
197 if (m_buf->ol_flags & PKT_TX_IPV4)
198 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
200 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
201 net_hdr->gso_size = m_buf->tso_segsz;
202 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
204 } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
205 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
206 net_hdr->gso_size = m_buf->tso_segsz;
207 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
210 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
211 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
212 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
216 static __rte_always_inline int
217 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
218 struct vring_desc *descs, struct rte_mbuf *m,
219 uint16_t desc_idx, uint32_t size)
221 uint32_t desc_avail, desc_offset;
222 uint32_t mbuf_avail, mbuf_offset;
225 struct vring_desc *desc;
227 /* A counter to avoid desc dead loop chain */
228 uint16_t nr_desc = 1;
229 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
230 uint16_t copy_nb = vq->batch_copy_nb_elems;
233 desc = &descs[desc_idx];
235 desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
236 &dlen, VHOST_ACCESS_RW);
238 * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
239 * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
240 * otherwise stores offset on the stack instead of in a register.
242 if (unlikely(dlen != desc->len || desc->len < dev->vhost_hlen) ||
248 rte_prefetch0((void *)(uintptr_t)desc_addr);
250 virtio_enqueue_offload(m, (struct virtio_net_hdr *)(uintptr_t)desc_addr);
251 vhost_log_write(dev, desc->addr, dev->vhost_hlen);
252 PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
254 desc_offset = dev->vhost_hlen;
255 desc_avail = desc->len - dev->vhost_hlen;
257 mbuf_avail = rte_pktmbuf_data_len(m);
259 while (mbuf_avail != 0 || m->next != NULL) {
260 /* done with current mbuf, fetch next */
261 if (mbuf_avail == 0) {
265 mbuf_avail = rte_pktmbuf_data_len(m);
268 /* done with current desc buf, fetch next */
269 if (desc_avail == 0) {
270 if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
271 /* Room in vring buffer is not enough */
275 if (unlikely(desc->next >= size || ++nr_desc > size)) {
280 desc = &descs[desc->next];
282 desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
285 if (unlikely(!desc_addr || dlen != desc->len)) {
291 desc_avail = desc->len;
294 cpy_len = RTE_MIN(desc_avail, mbuf_avail);
295 if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) {
296 rte_memcpy((void *)((uintptr_t)(desc_addr +
298 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
300 vhost_log_write(dev, desc->addr + desc_offset, cpy_len);
301 PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
304 batch_copy[copy_nb].dst =
305 (void *)((uintptr_t)(desc_addr + desc_offset));
306 batch_copy[copy_nb].src =
307 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
308 batch_copy[copy_nb].log_addr = desc->addr + desc_offset;
309 batch_copy[copy_nb].len = cpy_len;
313 mbuf_avail -= cpy_len;
314 mbuf_offset += cpy_len;
315 desc_avail -= cpy_len;
316 desc_offset += cpy_len;
320 vq->batch_copy_nb_elems = copy_nb;
326 * This function adds buffers to the virtio devices RX virtqueue. Buffers can
327 * be received from the physical port or from another virtio device. A packet
328 * count is returned to indicate the number of packets that are successfully
329 * added to the RX queue. This function works when the mbuf is scattered, but
330 * it doesn't support the mergeable feature.
332 static __rte_always_inline uint32_t
333 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
334 struct rte_mbuf **pkts, uint32_t count)
336 struct vhost_virtqueue *vq;
337 uint16_t avail_idx, free_entries, start_idx;
338 uint16_t desc_indexes[MAX_PKT_BURST];
339 struct vring_desc *descs;
343 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
344 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
345 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
346 dev->vid, __func__, queue_id);
350 vq = dev->virtqueue[queue_id];
352 rte_spinlock_lock(&vq->access_lock);
354 if (unlikely(vq->enabled == 0))
355 goto out_access_unlock;
357 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
358 vhost_user_iotlb_rd_lock(vq);
360 if (unlikely(vq->access_ok == 0)) {
361 if (unlikely(vring_translate(dev, vq) < 0)) {
367 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
368 start_idx = vq->last_used_idx;
369 free_entries = avail_idx - start_idx;
370 count = RTE_MIN(count, free_entries);
371 count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
375 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
376 dev->vid, start_idx, start_idx + count);
378 vq->batch_copy_nb_elems = 0;
380 /* Retrieve all of the desc indexes first to avoid caching issues. */
381 rte_prefetch0(&vq->avail->ring[start_idx & (vq->size - 1)]);
382 for (i = 0; i < count; i++) {
383 used_idx = (start_idx + i) & (vq->size - 1);
384 desc_indexes[i] = vq->avail->ring[used_idx];
385 vq->used->ring[used_idx].id = desc_indexes[i];
386 vq->used->ring[used_idx].len = pkts[i]->pkt_len +
388 vhost_log_used_vring(dev, vq,
389 offsetof(struct vring_used, ring[used_idx]),
390 sizeof(vq->used->ring[used_idx]));
393 rte_prefetch0(&vq->desc[desc_indexes[0]]);
394 for (i = 0; i < count; i++) {
395 struct vring_desc *idesc = NULL;
396 uint16_t desc_idx = desc_indexes[i];
399 if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
400 uint64_t dlen = vq->desc[desc_idx].len;
401 descs = (struct vring_desc *)(uintptr_t)
402 vhost_iova_to_vva(dev,
403 vq, vq->desc[desc_idx].addr,
404 &dlen, VHOST_ACCESS_RO);
405 if (unlikely(!descs)) {
410 if (unlikely(dlen < vq->desc[desc_idx].len)) {
412 * The indirect desc table is not contiguous
413 * in process VA space, we have to copy it.
415 idesc = alloc_copy_ind_table(dev, vq,
416 &vq->desc[desc_idx]);
417 if (unlikely(!idesc))
424 sz = vq->desc[desc_idx].len / sizeof(*descs);
430 err = copy_mbuf_to_desc(dev, vq, descs, pkts[i], desc_idx, sz);
433 free_ind_table(idesc);
438 rte_prefetch0(&vq->desc[desc_indexes[i+1]]);
440 if (unlikely(!!idesc))
441 free_ind_table(idesc);
444 do_data_copy_enqueue(dev, vq);
448 *(volatile uint16_t *)&vq->used->idx += count;
449 vq->last_used_idx += count;
450 vhost_log_used_vring(dev, vq,
451 offsetof(struct vring_used, idx),
452 sizeof(vq->used->idx));
454 vhost_vring_call(dev, vq);
456 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
457 vhost_user_iotlb_rd_unlock(vq);
460 rte_spinlock_unlock(&vq->access_lock);
465 static __rte_always_inline int
466 fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
467 uint32_t avail_idx, uint32_t *vec_idx,
468 struct buf_vector *buf_vec, uint16_t *desc_chain_head,
469 uint16_t *desc_chain_len)
471 uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
472 uint32_t vec_id = *vec_idx;
475 struct vring_desc *descs = vq->desc;
476 struct vring_desc *idesc = NULL;
478 *desc_chain_head = idx;
480 if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
481 dlen = vq->desc[idx].len;
482 descs = (struct vring_desc *)(uintptr_t)
483 vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
486 if (unlikely(!descs))
489 if (unlikely(dlen < vq->desc[idx].len)) {
491 * The indirect desc table is not contiguous
492 * in process VA space, we have to copy it.
494 idesc = alloc_copy_ind_table(dev, vq, &vq->desc[idx]);
495 if (unlikely(!idesc))
505 if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) {
506 free_ind_table(idesc);
510 len += descs[idx].len;
511 buf_vec[vec_id].buf_addr = descs[idx].addr;
512 buf_vec[vec_id].buf_len = descs[idx].len;
513 buf_vec[vec_id].desc_idx = idx;
516 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
519 idx = descs[idx].next;
522 *desc_chain_len = len;
525 if (unlikely(!!idesc))
526 free_ind_table(idesc);
532 * Returns -1 on fail, 0 on success
535 reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
536 uint32_t size, struct buf_vector *buf_vec,
537 uint16_t *num_buffers, uint16_t avail_head)
540 uint32_t vec_idx = 0;
543 uint16_t head_idx = 0;
547 cur_idx = vq->last_avail_idx;
550 if (unlikely(cur_idx == avail_head))
553 if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec,
554 &head_idx, &len) < 0))
556 len = RTE_MIN(len, size);
557 update_shadow_used_ring(vq, head_idx, len);
565 * if we tried all available ring items, and still
566 * can't get enough buf, it means something abnormal
569 if (unlikely(tries >= vq->size))
576 static __rte_always_inline int
577 copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
578 struct rte_mbuf *m, struct buf_vector *buf_vec,
579 uint16_t num_buffers)
581 uint32_t vec_idx = 0;
583 uint32_t mbuf_offset, mbuf_avail;
584 uint32_t desc_offset, desc_avail;
587 uint64_t hdr_addr, hdr_phys_addr;
588 struct rte_mbuf *hdr_mbuf;
589 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
590 uint16_t copy_nb = vq->batch_copy_nb_elems;
593 if (unlikely(m == NULL)) {
598 dlen = buf_vec[vec_idx].buf_len;
599 desc_addr = vhost_iova_to_vva(dev, vq, buf_vec[vec_idx].buf_addr,
600 &dlen, VHOST_ACCESS_RW);
601 if (dlen != buf_vec[vec_idx].buf_len ||
602 buf_vec[vec_idx].buf_len < dev->vhost_hlen ||
609 hdr_addr = desc_addr;
610 hdr_phys_addr = buf_vec[vec_idx].buf_addr;
611 rte_prefetch0((void *)(uintptr_t)hdr_addr);
613 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
614 dev->vid, num_buffers);
616 desc_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
617 desc_offset = dev->vhost_hlen;
619 mbuf_avail = rte_pktmbuf_data_len(m);
621 while (mbuf_avail != 0 || m->next != NULL) {
622 /* done with current desc buf, get the next one */
623 if (desc_avail == 0) {
625 dlen = buf_vec[vec_idx].buf_len;
627 vhost_iova_to_vva(dev, vq,
628 buf_vec[vec_idx].buf_addr,
631 if (unlikely(!desc_addr ||
632 dlen != buf_vec[vec_idx].buf_len)) {
637 /* Prefetch buffer address. */
638 rte_prefetch0((void *)(uintptr_t)desc_addr);
640 desc_avail = buf_vec[vec_idx].buf_len;
643 /* done with current mbuf, get the next one */
644 if (mbuf_avail == 0) {
648 mbuf_avail = rte_pktmbuf_data_len(m);
652 struct virtio_net_hdr_mrg_rxbuf *hdr;
654 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)
656 virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
657 ASSIGN_UNLESS_EQUAL(hdr->num_buffers, num_buffers);
659 vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen);
660 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
666 cpy_len = RTE_MIN(desc_avail, mbuf_avail);
668 if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) {
669 rte_memcpy((void *)((uintptr_t)(desc_addr +
671 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
674 buf_vec[vec_idx].buf_addr + desc_offset,
676 PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
679 batch_copy[copy_nb].dst =
680 (void *)((uintptr_t)(desc_addr + desc_offset));
681 batch_copy[copy_nb].src =
682 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
683 batch_copy[copy_nb].log_addr =
684 buf_vec[vec_idx].buf_addr + desc_offset;
685 batch_copy[copy_nb].len = cpy_len;
689 mbuf_avail -= cpy_len;
690 mbuf_offset += cpy_len;
691 desc_avail -= cpy_len;
692 desc_offset += cpy_len;
696 vq->batch_copy_nb_elems = copy_nb;
701 static __rte_always_inline uint32_t
702 virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
703 struct rte_mbuf **pkts, uint32_t count)
705 struct vhost_virtqueue *vq;
706 uint32_t pkt_idx = 0;
707 uint16_t num_buffers;
708 struct buf_vector buf_vec[BUF_VECTOR_MAX];
711 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
712 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
713 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
714 dev->vid, __func__, queue_id);
718 vq = dev->virtqueue[queue_id];
720 rte_spinlock_lock(&vq->access_lock);
722 if (unlikely(vq->enabled == 0))
723 goto out_access_unlock;
725 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
726 vhost_user_iotlb_rd_lock(vq);
728 if (unlikely(vq->access_ok == 0))
729 if (unlikely(vring_translate(dev, vq) < 0))
732 count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
736 vq->batch_copy_nb_elems = 0;
738 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
740 vq->shadow_used_idx = 0;
741 avail_head = *((volatile uint16_t *)&vq->avail->idx);
742 for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
743 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
745 if (unlikely(reserve_avail_buf_mergeable(dev, vq,
746 pkt_len, buf_vec, &num_buffers,
748 VHOST_LOG_DEBUG(VHOST_DATA,
749 "(%d) failed to get enough desc from vring\n",
751 vq->shadow_used_idx -= num_buffers;
755 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
756 dev->vid, vq->last_avail_idx,
757 vq->last_avail_idx + num_buffers);
759 if (copy_mbuf_to_desc_mergeable(dev, vq, pkts[pkt_idx],
760 buf_vec, num_buffers) < 0) {
761 vq->shadow_used_idx -= num_buffers;
765 vq->last_avail_idx += num_buffers;
768 do_data_copy_enqueue(dev, vq);
770 if (likely(vq->shadow_used_idx)) {
771 flush_shadow_used_ring(dev, vq);
772 vhost_vring_call(dev, vq);
776 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
777 vhost_user_iotlb_rd_unlock(vq);
780 rte_spinlock_unlock(&vq->access_lock);
786 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
787 struct rte_mbuf **pkts, uint16_t count)
789 struct virtio_net *dev = get_device(vid);
794 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
795 RTE_LOG(ERR, VHOST_DATA,
796 "(%d) %s: built-in vhost net backend is disabled.\n",
801 if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
802 return virtio_dev_merge_rx(dev, queue_id, pkts, count);
804 return virtio_dev_rx(dev, queue_id, pkts, count);
808 virtio_net_with_host_offload(struct virtio_net *dev)
811 ((1ULL << VIRTIO_NET_F_CSUM) |
812 (1ULL << VIRTIO_NET_F_HOST_ECN) |
813 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
814 (1ULL << VIRTIO_NET_F_HOST_TSO6) |
815 (1ULL << VIRTIO_NET_F_HOST_UFO)))
822 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
824 struct ipv4_hdr *ipv4_hdr;
825 struct ipv6_hdr *ipv6_hdr;
827 struct ether_hdr *eth_hdr;
830 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
832 m->l2_len = sizeof(struct ether_hdr);
833 ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
835 if (ethertype == ETHER_TYPE_VLAN) {
836 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
838 m->l2_len += sizeof(struct vlan_hdr);
839 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
842 l3_hdr = (char *)eth_hdr + m->l2_len;
845 case ETHER_TYPE_IPv4:
847 *l4_proto = ipv4_hdr->next_proto_id;
848 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
849 *l4_hdr = (char *)l3_hdr + m->l3_len;
850 m->ol_flags |= PKT_TX_IPV4;
852 case ETHER_TYPE_IPv6:
854 *l4_proto = ipv6_hdr->proto;
855 m->l3_len = sizeof(struct ipv6_hdr);
856 *l4_hdr = (char *)l3_hdr + m->l3_len;
857 m->ol_flags |= PKT_TX_IPV6;
867 static __rte_always_inline void
868 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
870 uint16_t l4_proto = 0;
872 struct tcp_hdr *tcp_hdr = NULL;
874 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
877 parse_ethernet(m, &l4_proto, &l4_hdr);
878 if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
879 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
880 switch (hdr->csum_offset) {
881 case (offsetof(struct tcp_hdr, cksum)):
882 if (l4_proto == IPPROTO_TCP)
883 m->ol_flags |= PKT_TX_TCP_CKSUM;
885 case (offsetof(struct udp_hdr, dgram_cksum)):
886 if (l4_proto == IPPROTO_UDP)
887 m->ol_flags |= PKT_TX_UDP_CKSUM;
889 case (offsetof(struct sctp_hdr, cksum)):
890 if (l4_proto == IPPROTO_SCTP)
891 m->ol_flags |= PKT_TX_SCTP_CKSUM;
899 if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
900 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
901 case VIRTIO_NET_HDR_GSO_TCPV4:
902 case VIRTIO_NET_HDR_GSO_TCPV6:
904 m->ol_flags |= PKT_TX_TCP_SEG;
905 m->tso_segsz = hdr->gso_size;
906 m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
908 case VIRTIO_NET_HDR_GSO_UDP:
909 m->ol_flags |= PKT_TX_UDP_SEG;
910 m->tso_segsz = hdr->gso_size;
911 m->l4_len = sizeof(struct udp_hdr);
914 RTE_LOG(WARNING, VHOST_DATA,
915 "unsupported gso type %u.\n", hdr->gso_type);
921 static __rte_always_inline void
922 put_zmbuf(struct zcopy_mbuf *zmbuf)
927 static __rte_always_inline int
928 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
929 struct vring_desc *descs, uint16_t max_desc,
930 struct rte_mbuf *m, uint16_t desc_idx,
931 struct rte_mempool *mbuf_pool)
933 struct vring_desc *desc;
935 uint32_t desc_avail, desc_offset;
936 uint32_t mbuf_avail, mbuf_offset;
939 struct rte_mbuf *cur = m, *prev = m;
940 struct virtio_net_hdr *hdr = NULL;
941 /* A counter to avoid desc dead loop chain */
942 uint32_t nr_desc = 1;
943 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
944 uint16_t copy_nb = vq->batch_copy_nb_elems;
947 desc = &descs[desc_idx];
948 if (unlikely((desc->len < dev->vhost_hlen)) ||
949 (desc->flags & VRING_DESC_F_INDIRECT)) {
955 desc_addr = vhost_iova_to_vva(dev,
959 if (unlikely(!desc_addr || dlen != desc->len)) {
964 if (virtio_net_with_host_offload(dev)) {
965 hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
970 * A virtio driver normally uses at least 2 desc buffers
971 * for Tx: the first for storing the header, and others
972 * for storing the data.
974 if (likely((desc->len == dev->vhost_hlen) &&
975 (desc->flags & VRING_DESC_F_NEXT) != 0)) {
976 desc = &descs[desc->next];
977 if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) {
983 desc_addr = vhost_iova_to_vva(dev,
987 if (unlikely(!desc_addr || dlen != desc->len)) {
993 desc_avail = desc->len;
996 desc_avail = desc->len - dev->vhost_hlen;
997 desc_offset = dev->vhost_hlen;
1000 rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset));
1002 PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), desc_avail, 0);
1005 mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
1009 cpy_len = RTE_MIN(desc_avail, mbuf_avail);
1012 * A desc buf might across two host physical pages that are
1013 * not continuous. In such case (gpa_to_hpa returns 0), data
1014 * will be copied even though zero copy is enabled.
1016 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1017 desc->addr + desc_offset, cpy_len)))) {
1018 cur->data_len = cpy_len;
1020 cur->buf_addr = (void *)(uintptr_t)(desc_addr
1022 cur->buf_iova = hpa;
1025 * In zero copy mode, one mbuf can only reference data
1026 * for one or partial of one desc buff.
1028 mbuf_avail = cpy_len;
1030 if (likely(cpy_len > MAX_BATCH_LEN ||
1031 copy_nb >= vq->size ||
1032 (hdr && cur == m))) {
1033 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1035 (void *)((uintptr_t)(desc_addr +
1039 batch_copy[copy_nb].dst =
1040 rte_pktmbuf_mtod_offset(cur, void *,
1042 batch_copy[copy_nb].src =
1043 (void *)((uintptr_t)(desc_addr +
1045 batch_copy[copy_nb].len = cpy_len;
1050 mbuf_avail -= cpy_len;
1051 mbuf_offset += cpy_len;
1052 desc_avail -= cpy_len;
1053 desc_offset += cpy_len;
1055 /* This desc reaches to its end, get the next one */
1056 if (desc_avail == 0) {
1057 if ((desc->flags & VRING_DESC_F_NEXT) == 0)
1060 if (unlikely(desc->next >= max_desc ||
1061 ++nr_desc > max_desc)) {
1065 desc = &descs[desc->next];
1066 if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) {
1072 desc_addr = vhost_iova_to_vva(dev,
1074 &dlen, VHOST_ACCESS_RO);
1075 if (unlikely(!desc_addr || dlen != desc->len)) {
1080 rte_prefetch0((void *)(uintptr_t)desc_addr);
1083 desc_avail = desc->len;
1085 PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0);
1089 * This mbuf reaches to its end, get a new one
1090 * to hold more data.
1092 if (mbuf_avail == 0) {
1093 cur = rte_pktmbuf_alloc(mbuf_pool);
1094 if (unlikely(cur == NULL)) {
1095 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1096 "allocate memory for mbuf.\n");
1100 if (unlikely(dev->dequeue_zero_copy))
1101 rte_mbuf_refcnt_update(cur, 1);
1104 prev->data_len = mbuf_offset;
1106 m->pkt_len += mbuf_offset;
1110 mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1114 prev->data_len = mbuf_offset;
1115 m->pkt_len += mbuf_offset;
1118 vhost_dequeue_offload(hdr, m);
1121 vq->batch_copy_nb_elems = copy_nb;
1126 static __rte_always_inline void
1127 update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
1128 uint32_t used_idx, uint32_t desc_idx)
1130 vq->used->ring[used_idx].id = desc_idx;
1131 vq->used->ring[used_idx].len = 0;
1132 vhost_log_used_vring(dev, vq,
1133 offsetof(struct vring_used, ring[used_idx]),
1134 sizeof(vq->used->ring[used_idx]));
1137 static __rte_always_inline void
1138 update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
1141 if (unlikely(count == 0))
1147 vq->used->idx += count;
1148 vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
1149 sizeof(vq->used->idx));
1150 vhost_vring_call(dev, vq);
1153 static __rte_always_inline struct zcopy_mbuf *
1154 get_zmbuf(struct vhost_virtqueue *vq)
1160 /* search [last_zmbuf_idx, zmbuf_size) */
1161 i = vq->last_zmbuf_idx;
1162 last = vq->zmbuf_size;
1165 for (; i < last; i++) {
1166 if (vq->zmbufs[i].in_use == 0) {
1167 vq->last_zmbuf_idx = i + 1;
1168 vq->zmbufs[i].in_use = 1;
1169 return &vq->zmbufs[i];
1175 /* search [0, last_zmbuf_idx) */
1177 last = vq->last_zmbuf_idx;
1184 static __rte_always_inline bool
1185 mbuf_is_consumed(struct rte_mbuf *m)
1188 if (rte_mbuf_refcnt_read(m) > 1)
1196 static __rte_always_inline void
1197 restore_mbuf(struct rte_mbuf *m)
1199 uint32_t mbuf_size, priv_size;
1202 priv_size = rte_pktmbuf_priv_size(m->pool);
1203 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
1204 /* start of buffer is after mbuf structure and priv data */
1206 m->buf_addr = (char *)m + mbuf_size;
1207 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1213 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
1214 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1216 struct virtio_net *dev;
1217 struct rte_mbuf *rarp_mbuf = NULL;
1218 struct vhost_virtqueue *vq;
1219 uint32_t desc_indexes[MAX_PKT_BURST];
1222 uint16_t free_entries;
1225 dev = get_device(vid);
1229 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1230 RTE_LOG(ERR, VHOST_DATA,
1231 "(%d) %s: built-in vhost net backend is disabled.\n",
1232 dev->vid, __func__);
1236 if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
1237 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1238 dev->vid, __func__, queue_id);
1242 vq = dev->virtqueue[queue_id];
1244 if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
1247 if (unlikely(vq->enabled == 0))
1248 goto out_access_unlock;
1250 vq->batch_copy_nb_elems = 0;
1252 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1253 vhost_user_iotlb_rd_lock(vq);
1255 if (unlikely(vq->access_ok == 0))
1256 if (unlikely(vring_translate(dev, vq) < 0))
1259 if (unlikely(dev->dequeue_zero_copy)) {
1260 struct zcopy_mbuf *zmbuf, *next;
1263 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1264 zmbuf != NULL; zmbuf = next) {
1265 next = TAILQ_NEXT(zmbuf, next);
1267 if (mbuf_is_consumed(zmbuf->mbuf)) {
1268 used_idx = vq->last_used_idx++ & (vq->size - 1);
1269 update_used_ring(dev, vq, used_idx,
1273 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1274 restore_mbuf(zmbuf->mbuf);
1275 rte_pktmbuf_free(zmbuf->mbuf);
1281 update_used_idx(dev, vq, nr_updated);
1285 * Construct a RARP broadcast packet, and inject it to the "pkts"
1286 * array, to looks like that guest actually send such packet.
1288 * Check user_send_rarp() for more information.
1290 * broadcast_rarp shares a cacheline in the virtio_net structure
1291 * with some fields that are accessed during enqueue and
1292 * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
1293 * result in false sharing between enqueue and dequeue.
1295 * Prevent unnecessary false sharing by reading broadcast_rarp first
1296 * and only performing cmpset if the read indicates it is likely to
1300 if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
1301 rte_atomic16_cmpset((volatile uint16_t *)
1302 &dev->broadcast_rarp.cnt, 1, 0))) {
1304 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
1305 if (rarp_mbuf == NULL) {
1306 RTE_LOG(ERR, VHOST_DATA,
1307 "Failed to make RARP packet.\n");
1313 free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1315 if (free_entries == 0)
1318 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1320 /* Prefetch available and used ring */
1321 avail_idx = vq->last_avail_idx & (vq->size - 1);
1322 used_idx = vq->last_used_idx & (vq->size - 1);
1323 rte_prefetch0(&vq->avail->ring[avail_idx]);
1324 rte_prefetch0(&vq->used->ring[used_idx]);
1326 count = RTE_MIN(count, MAX_PKT_BURST);
1327 count = RTE_MIN(count, free_entries);
1328 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1331 /* Retrieve all of the head indexes first to avoid caching issues. */
1332 for (i = 0; i < count; i++) {
1333 avail_idx = (vq->last_avail_idx + i) & (vq->size - 1);
1334 used_idx = (vq->last_used_idx + i) & (vq->size - 1);
1335 desc_indexes[i] = vq->avail->ring[avail_idx];
1337 if (likely(dev->dequeue_zero_copy == 0))
1338 update_used_ring(dev, vq, used_idx, desc_indexes[i]);
1341 /* Prefetch descriptor index. */
1342 rte_prefetch0(&vq->desc[desc_indexes[0]]);
1343 for (i = 0; i < count; i++) {
1344 struct vring_desc *desc, *idesc = NULL;
1349 if (likely(i + 1 < count))
1350 rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
1352 if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) {
1353 dlen = vq->desc[desc_indexes[i]].len;
1354 desc = (struct vring_desc *)(uintptr_t)
1355 vhost_iova_to_vva(dev, vq,
1356 vq->desc[desc_indexes[i]].addr,
1359 if (unlikely(!desc))
1362 if (unlikely(dlen < vq->desc[desc_indexes[i]].len)) {
1364 * The indirect desc table is not contiguous
1365 * in process VA space, we have to copy it.
1367 idesc = alloc_copy_ind_table(dev, vq,
1368 &vq->desc[desc_indexes[i]]);
1369 if (unlikely(!idesc))
1375 rte_prefetch0(desc);
1376 sz = vq->desc[desc_indexes[i]].len / sizeof(*desc);
1381 idx = desc_indexes[i];
1384 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
1385 if (unlikely(pkts[i] == NULL)) {
1386 RTE_LOG(ERR, VHOST_DATA,
1387 "Failed to allocate memory for mbuf.\n");
1388 free_ind_table(idesc);
1392 err = copy_desc_to_mbuf(dev, vq, desc, sz, pkts[i], idx,
1394 if (unlikely(err)) {
1395 rte_pktmbuf_free(pkts[i]);
1396 free_ind_table(idesc);
1400 if (unlikely(dev->dequeue_zero_copy)) {
1401 struct zcopy_mbuf *zmbuf;
1403 zmbuf = get_zmbuf(vq);
1405 rte_pktmbuf_free(pkts[i]);
1406 free_ind_table(idesc);
1409 zmbuf->mbuf = pkts[i];
1410 zmbuf->desc_idx = desc_indexes[i];
1413 * Pin lock the mbuf; we will check later to see
1414 * whether the mbuf is freed (when we are the last
1415 * user) or not. If that's the case, we then could
1416 * update the used ring safely.
1418 rte_mbuf_refcnt_update(pkts[i], 1);
1421 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1424 if (unlikely(!!idesc))
1425 free_ind_table(idesc);
1427 vq->last_avail_idx += i;
1429 if (likely(dev->dequeue_zero_copy == 0)) {
1430 do_data_copy_dequeue(vq);
1431 vq->last_used_idx += i;
1432 update_used_idx(dev, vq, i);
1436 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1437 vhost_user_iotlb_rd_unlock(vq);
1440 rte_spinlock_unlock(&vq->access_lock);
1442 if (unlikely(rarp_mbuf != NULL)) {
1444 * Inject it to the head of "pkts" array, so that switch's mac
1445 * learning table will get updated first.
1447 memmove(&pkts[1], pkts, i * sizeof(struct rte_mbuf *));
1448 pkts[0] = rarp_mbuf;