4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/virtio_net.h>
39 #include <rte_memcpy.h>
40 #include <rte_ether.h>
42 #include <rte_vhost.h>
50 #define MAX_PKT_BURST 32
52 #define MAX_BATCH_LEN 256
55 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
57 return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
60 static __rte_always_inline void
61 do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
62 uint16_t to, uint16_t from, uint16_t size)
64 rte_memcpy(&vq->used->ring[to],
65 &vq->shadow_used_ring[from],
66 size * sizeof(struct vring_used_elem));
67 vhost_log_used_vring(dev, vq,
68 offsetof(struct vring_used, ring[to]),
69 size * sizeof(struct vring_used_elem));
72 static __rte_always_inline void
73 flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
75 uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
77 if (used_idx + vq->shadow_used_idx <= vq->size) {
78 do_flush_shadow_used_ring(dev, vq, used_idx, 0,
83 /* update used ring interval [used_idx, vq->size] */
84 size = vq->size - used_idx;
85 do_flush_shadow_used_ring(dev, vq, used_idx, 0, size);
87 /* update the left half used ring interval [0, left_size] */
88 do_flush_shadow_used_ring(dev, vq, 0, size,
89 vq->shadow_used_idx - size);
91 vq->last_used_idx += vq->shadow_used_idx;
95 *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
96 vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
97 sizeof(vq->used->idx));
100 static __rte_always_inline void
101 update_shadow_used_ring(struct vhost_virtqueue *vq,
102 uint16_t desc_idx, uint16_t len)
104 uint16_t i = vq->shadow_used_idx++;
106 vq->shadow_used_ring[i].id = desc_idx;
107 vq->shadow_used_ring[i].len = len;
111 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
113 struct batch_copy_elem *elem = vq->batch_copy_elems;
114 uint16_t count = vq->batch_copy_nb_elems;
117 for (i = 0; i < count; i++) {
118 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
119 vhost_log_write(dev, elem[i].log_addr, elem[i].len);
120 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
125 do_data_copy_dequeue(struct vhost_virtqueue *vq)
127 struct batch_copy_elem *elem = vq->batch_copy_elems;
128 uint16_t count = vq->batch_copy_nb_elems;
131 for (i = 0; i < count; i++)
132 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
135 /* avoid write operation when necessary, to lessen cache issues */
136 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
137 if ((var) != (val)) \
142 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
144 uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
146 if (m_buf->ol_flags & PKT_TX_TCP_SEG)
147 csum_l4 |= PKT_TX_TCP_CKSUM;
150 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
151 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
154 case PKT_TX_TCP_CKSUM:
155 net_hdr->csum_offset = (offsetof(struct tcp_hdr,
158 case PKT_TX_UDP_CKSUM:
159 net_hdr->csum_offset = (offsetof(struct udp_hdr,
162 case PKT_TX_SCTP_CKSUM:
163 net_hdr->csum_offset = (offsetof(struct sctp_hdr,
168 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
169 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
170 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
173 /* IP cksum verification cannot be bypassed, then calculate here */
174 if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
175 struct ipv4_hdr *ipv4_hdr;
177 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *,
179 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
182 if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
183 if (m_buf->ol_flags & PKT_TX_IPV4)
184 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
186 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
187 net_hdr->gso_size = m_buf->tso_segsz;
188 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
191 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
192 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
193 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
197 static __rte_always_inline int
198 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
199 struct vring_desc *descs, struct rte_mbuf *m,
200 uint16_t desc_idx, uint32_t size)
202 uint32_t desc_avail, desc_offset;
203 uint32_t mbuf_avail, mbuf_offset;
205 struct vring_desc *desc;
207 /* A counter to avoid desc dead loop chain */
208 uint16_t nr_desc = 1;
209 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
210 uint16_t copy_nb = vq->batch_copy_nb_elems;
213 desc = &descs[desc_idx];
214 desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
216 * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
217 * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
218 * otherwise stores offset on the stack instead of in a register.
220 if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr) {
225 rte_prefetch0((void *)(uintptr_t)desc_addr);
227 virtio_enqueue_offload(m, (struct virtio_net_hdr *)(uintptr_t)desc_addr);
228 vhost_log_write(dev, desc->addr, dev->vhost_hlen);
229 PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
231 desc_offset = dev->vhost_hlen;
232 desc_avail = desc->len - dev->vhost_hlen;
234 mbuf_avail = rte_pktmbuf_data_len(m);
236 while (mbuf_avail != 0 || m->next != NULL) {
237 /* done with current mbuf, fetch next */
238 if (mbuf_avail == 0) {
242 mbuf_avail = rte_pktmbuf_data_len(m);
245 /* done with current desc buf, fetch next */
246 if (desc_avail == 0) {
247 if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
248 /* Room in vring buffer is not enough */
252 if (unlikely(desc->next >= size || ++nr_desc > size)) {
257 desc = &descs[desc->next];
258 desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
259 if (unlikely(!desc_addr)) {
265 desc_avail = desc->len;
268 cpy_len = RTE_MIN(desc_avail, mbuf_avail);
269 if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) {
270 rte_memcpy((void *)((uintptr_t)(desc_addr +
272 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
274 vhost_log_write(dev, desc->addr + desc_offset, cpy_len);
275 PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
278 batch_copy[copy_nb].dst =
279 (void *)((uintptr_t)(desc_addr + desc_offset));
280 batch_copy[copy_nb].src =
281 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
282 batch_copy[copy_nb].log_addr = desc->addr + desc_offset;
283 batch_copy[copy_nb].len = cpy_len;
287 mbuf_avail -= cpy_len;
288 mbuf_offset += cpy_len;
289 desc_avail -= cpy_len;
290 desc_offset += cpy_len;
294 vq->batch_copy_nb_elems = copy_nb;
300 * This function adds buffers to the virtio devices RX virtqueue. Buffers can
301 * be received from the physical port or from another virtio device. A packet
302 * count is returned to indicate the number of packets that are successfully
303 * added to the RX queue. This function works when the mbuf is scattered, but
304 * it doesn't support the mergeable feature.
306 static __rte_always_inline uint32_t
307 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
308 struct rte_mbuf **pkts, uint32_t count)
310 struct vhost_virtqueue *vq;
311 uint16_t avail_idx, free_entries, start_idx;
312 uint16_t desc_indexes[MAX_PKT_BURST];
313 struct vring_desc *descs;
317 LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
318 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
319 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
320 dev->vid, __func__, queue_id);
324 vq = dev->virtqueue[queue_id];
325 if (unlikely(vq->enabled == 0))
328 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
329 start_idx = vq->last_used_idx;
330 free_entries = avail_idx - start_idx;
331 count = RTE_MIN(count, free_entries);
332 count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
336 LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
337 dev->vid, start_idx, start_idx + count);
339 vq->batch_copy_nb_elems = 0;
341 /* Retrieve all of the desc indexes first to avoid caching issues. */
342 rte_prefetch0(&vq->avail->ring[start_idx & (vq->size - 1)]);
343 for (i = 0; i < count; i++) {
344 used_idx = (start_idx + i) & (vq->size - 1);
345 desc_indexes[i] = vq->avail->ring[used_idx];
346 vq->used->ring[used_idx].id = desc_indexes[i];
347 vq->used->ring[used_idx].len = pkts[i]->pkt_len +
349 vhost_log_used_vring(dev, vq,
350 offsetof(struct vring_used, ring[used_idx]),
351 sizeof(vq->used->ring[used_idx]));
354 rte_prefetch0(&vq->desc[desc_indexes[0]]);
355 for (i = 0; i < count; i++) {
356 uint16_t desc_idx = desc_indexes[i];
359 if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
360 descs = (struct vring_desc *)(uintptr_t)
361 rte_vhost_gpa_to_vva(dev->mem,
362 vq->desc[desc_idx].addr);
363 if (unlikely(!descs)) {
369 sz = vq->desc[desc_idx].len / sizeof(*descs);
375 err = copy_mbuf_to_desc(dev, vq, descs, pkts[i], desc_idx, sz);
382 rte_prefetch0(&vq->desc[desc_indexes[i+1]]);
385 do_data_copy_enqueue(dev, vq);
389 *(volatile uint16_t *)&vq->used->idx += count;
390 vq->last_used_idx += count;
391 vhost_log_used_vring(dev, vq,
392 offsetof(struct vring_used, idx),
393 sizeof(vq->used->idx));
395 /* flush used->idx update before we read avail->flags. */
398 /* Kick the guest if necessary. */
399 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
400 && (vq->callfd >= 0))
401 eventfd_write(vq->callfd, (eventfd_t)1);
405 static __rte_always_inline int
406 fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
407 uint32_t avail_idx, uint32_t *vec_idx,
408 struct buf_vector *buf_vec, uint16_t *desc_chain_head,
409 uint16_t *desc_chain_len)
411 uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
412 uint32_t vec_id = *vec_idx;
414 struct vring_desc *descs = vq->desc;
416 *desc_chain_head = idx;
418 if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
419 descs = (struct vring_desc *)(uintptr_t)
420 rte_vhost_gpa_to_vva(dev->mem, vq->desc[idx].addr);
421 if (unlikely(!descs))
428 if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size))
431 len += descs[idx].len;
432 buf_vec[vec_id].buf_addr = descs[idx].addr;
433 buf_vec[vec_id].buf_len = descs[idx].len;
434 buf_vec[vec_id].desc_idx = idx;
437 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
440 idx = descs[idx].next;
443 *desc_chain_len = len;
450 * Returns -1 on fail, 0 on success
453 reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
454 uint32_t size, struct buf_vector *buf_vec,
455 uint16_t *num_buffers, uint16_t avail_head)
458 uint32_t vec_idx = 0;
461 uint16_t head_idx = 0;
465 cur_idx = vq->last_avail_idx;
468 if (unlikely(cur_idx == avail_head))
471 if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec,
472 &head_idx, &len) < 0))
474 len = RTE_MIN(len, size);
475 update_shadow_used_ring(vq, head_idx, len);
483 * if we tried all available ring items, and still
484 * can't get enough buf, it means something abnormal
487 if (unlikely(tries >= vq->size))
494 static __rte_always_inline int
495 copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
496 struct rte_mbuf *m, struct buf_vector *buf_vec,
497 uint16_t num_buffers)
499 uint32_t vec_idx = 0;
501 uint32_t mbuf_offset, mbuf_avail;
502 uint32_t desc_offset, desc_avail;
504 uint64_t hdr_addr, hdr_phys_addr;
505 struct rte_mbuf *hdr_mbuf;
506 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
507 uint16_t copy_nb = vq->batch_copy_nb_elems;
510 if (unlikely(m == NULL)) {
515 desc_addr = rte_vhost_gpa_to_vva(dev->mem, buf_vec[vec_idx].buf_addr);
516 if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) {
522 hdr_addr = desc_addr;
523 hdr_phys_addr = buf_vec[vec_idx].buf_addr;
524 rte_prefetch0((void *)(uintptr_t)hdr_addr);
526 LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
527 dev->vid, num_buffers);
529 desc_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
530 desc_offset = dev->vhost_hlen;
532 mbuf_avail = rte_pktmbuf_data_len(m);
534 while (mbuf_avail != 0 || m->next != NULL) {
535 /* done with current desc buf, get the next one */
536 if (desc_avail == 0) {
538 desc_addr = rte_vhost_gpa_to_vva(dev->mem,
539 buf_vec[vec_idx].buf_addr);
540 if (unlikely(!desc_addr)) {
545 /* Prefetch buffer address. */
546 rte_prefetch0((void *)(uintptr_t)desc_addr);
548 desc_avail = buf_vec[vec_idx].buf_len;
551 /* done with current mbuf, get the next one */
552 if (mbuf_avail == 0) {
556 mbuf_avail = rte_pktmbuf_data_len(m);
560 struct virtio_net_hdr_mrg_rxbuf *hdr;
562 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)
564 virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
565 ASSIGN_UNLESS_EQUAL(hdr->num_buffers, num_buffers);
567 vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen);
568 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
574 cpy_len = RTE_MIN(desc_avail, mbuf_avail);
576 if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) {
577 rte_memcpy((void *)((uintptr_t)(desc_addr +
579 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
582 buf_vec[vec_idx].buf_addr + desc_offset,
584 PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
587 batch_copy[copy_nb].dst =
588 (void *)((uintptr_t)(desc_addr + desc_offset));
589 batch_copy[copy_nb].src =
590 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
591 batch_copy[copy_nb].log_addr =
592 buf_vec[vec_idx].buf_addr + desc_offset;
593 batch_copy[copy_nb].len = cpy_len;
597 mbuf_avail -= cpy_len;
598 mbuf_offset += cpy_len;
599 desc_avail -= cpy_len;
600 desc_offset += cpy_len;
604 vq->batch_copy_nb_elems = copy_nb;
609 static __rte_always_inline uint32_t
610 virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
611 struct rte_mbuf **pkts, uint32_t count)
613 struct vhost_virtqueue *vq;
614 uint32_t pkt_idx = 0;
615 uint16_t num_buffers;
616 struct buf_vector buf_vec[BUF_VECTOR_MAX];
619 LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
620 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
621 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
622 dev->vid, __func__, queue_id);
626 vq = dev->virtqueue[queue_id];
627 if (unlikely(vq->enabled == 0))
630 count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
634 vq->batch_copy_nb_elems = 0;
636 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
638 vq->shadow_used_idx = 0;
639 avail_head = *((volatile uint16_t *)&vq->avail->idx);
640 for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
641 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
643 if (unlikely(reserve_avail_buf_mergeable(dev, vq,
644 pkt_len, buf_vec, &num_buffers,
646 LOG_DEBUG(VHOST_DATA,
647 "(%d) failed to get enough desc from vring\n",
649 vq->shadow_used_idx -= num_buffers;
653 LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
654 dev->vid, vq->last_avail_idx,
655 vq->last_avail_idx + num_buffers);
657 if (copy_mbuf_to_desc_mergeable(dev, vq, pkts[pkt_idx],
658 buf_vec, num_buffers) < 0) {
659 vq->shadow_used_idx -= num_buffers;
663 vq->last_avail_idx += num_buffers;
666 do_data_copy_enqueue(dev, vq);
668 if (likely(vq->shadow_used_idx)) {
669 flush_shadow_used_ring(dev, vq);
671 /* flush used->idx update before we read avail->flags. */
674 /* Kick the guest if necessary. */
675 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
676 && (vq->callfd >= 0))
677 eventfd_write(vq->callfd, (eventfd_t)1);
684 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
685 struct rte_mbuf **pkts, uint16_t count)
687 struct virtio_net *dev = get_device(vid);
692 if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
693 return virtio_dev_merge_rx(dev, queue_id, pkts, count);
695 return virtio_dev_rx(dev, queue_id, pkts, count);
699 virtio_net_with_host_offload(struct virtio_net *dev)
702 ((1ULL << VIRTIO_NET_F_CSUM) |
703 (1ULL << VIRTIO_NET_F_HOST_ECN) |
704 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
705 (1ULL << VIRTIO_NET_F_HOST_TSO6) |
706 (1ULL << VIRTIO_NET_F_HOST_UFO)))
713 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
715 struct ipv4_hdr *ipv4_hdr;
716 struct ipv6_hdr *ipv6_hdr;
718 struct ether_hdr *eth_hdr;
721 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
723 m->l2_len = sizeof(struct ether_hdr);
724 ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
726 if (ethertype == ETHER_TYPE_VLAN) {
727 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
729 m->l2_len += sizeof(struct vlan_hdr);
730 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
733 l3_hdr = (char *)eth_hdr + m->l2_len;
736 case ETHER_TYPE_IPv4:
738 *l4_proto = ipv4_hdr->next_proto_id;
739 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
740 *l4_hdr = (char *)l3_hdr + m->l3_len;
741 m->ol_flags |= PKT_TX_IPV4;
743 case ETHER_TYPE_IPv6:
745 *l4_proto = ipv6_hdr->proto;
746 m->l3_len = sizeof(struct ipv6_hdr);
747 *l4_hdr = (char *)l3_hdr + m->l3_len;
748 m->ol_flags |= PKT_TX_IPV6;
758 static __rte_always_inline void
759 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
761 uint16_t l4_proto = 0;
763 struct tcp_hdr *tcp_hdr = NULL;
765 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
768 parse_ethernet(m, &l4_proto, &l4_hdr);
769 if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
770 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
771 switch (hdr->csum_offset) {
772 case (offsetof(struct tcp_hdr, cksum)):
773 if (l4_proto == IPPROTO_TCP)
774 m->ol_flags |= PKT_TX_TCP_CKSUM;
776 case (offsetof(struct udp_hdr, dgram_cksum)):
777 if (l4_proto == IPPROTO_UDP)
778 m->ol_flags |= PKT_TX_UDP_CKSUM;
780 case (offsetof(struct sctp_hdr, cksum)):
781 if (l4_proto == IPPROTO_SCTP)
782 m->ol_flags |= PKT_TX_SCTP_CKSUM;
790 if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
791 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
792 case VIRTIO_NET_HDR_GSO_TCPV4:
793 case VIRTIO_NET_HDR_GSO_TCPV6:
795 m->ol_flags |= PKT_TX_TCP_SEG;
796 m->tso_segsz = hdr->gso_size;
797 m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
800 RTE_LOG(WARNING, VHOST_DATA,
801 "unsupported gso type %u.\n", hdr->gso_type);
807 #define RARP_PKT_SIZE 64
810 make_rarp_packet(struct rte_mbuf *rarp_mbuf, const struct ether_addr *mac)
812 struct ether_hdr *eth_hdr;
813 struct arp_hdr *rarp;
815 if (rarp_mbuf->buf_len < 64) {
816 RTE_LOG(WARNING, VHOST_DATA,
817 "failed to make RARP; mbuf size too small %u (< %d)\n",
818 rarp_mbuf->buf_len, RARP_PKT_SIZE);
822 /* Ethernet header. */
823 eth_hdr = rte_pktmbuf_mtod_offset(rarp_mbuf, struct ether_hdr *, 0);
824 memset(eth_hdr->d_addr.addr_bytes, 0xff, ETHER_ADDR_LEN);
825 ether_addr_copy(mac, ð_hdr->s_addr);
826 eth_hdr->ether_type = htons(ETHER_TYPE_RARP);
829 rarp = (struct arp_hdr *)(eth_hdr + 1);
830 rarp->arp_hrd = htons(ARP_HRD_ETHER);
831 rarp->arp_pro = htons(ETHER_TYPE_IPv4);
832 rarp->arp_hln = ETHER_ADDR_LEN;
834 rarp->arp_op = htons(ARP_OP_REVREQUEST);
836 ether_addr_copy(mac, &rarp->arp_data.arp_sha);
837 ether_addr_copy(mac, &rarp->arp_data.arp_tha);
838 memset(&rarp->arp_data.arp_sip, 0x00, 4);
839 memset(&rarp->arp_data.arp_tip, 0x00, 4);
841 rarp_mbuf->pkt_len = rarp_mbuf->data_len = RARP_PKT_SIZE;
846 static __rte_always_inline void
847 put_zmbuf(struct zcopy_mbuf *zmbuf)
852 static __rte_always_inline int
853 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
854 struct vring_desc *descs, uint16_t max_desc,
855 struct rte_mbuf *m, uint16_t desc_idx,
856 struct rte_mempool *mbuf_pool)
858 struct vring_desc *desc;
860 uint32_t desc_avail, desc_offset;
861 uint32_t mbuf_avail, mbuf_offset;
863 struct rte_mbuf *cur = m, *prev = m;
864 struct virtio_net_hdr *hdr = NULL;
865 /* A counter to avoid desc dead loop chain */
866 uint32_t nr_desc = 1;
867 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
868 uint16_t copy_nb = vq->batch_copy_nb_elems;
871 desc = &descs[desc_idx];
872 if (unlikely((desc->len < dev->vhost_hlen)) ||
873 (desc->flags & VRING_DESC_F_INDIRECT)) {
878 desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
879 if (unlikely(!desc_addr)) {
884 if (virtio_net_with_host_offload(dev)) {
885 hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
890 * A virtio driver normally uses at least 2 desc buffers
891 * for Tx: the first for storing the header, and others
892 * for storing the data.
894 if (likely((desc->len == dev->vhost_hlen) &&
895 (desc->flags & VRING_DESC_F_NEXT) != 0)) {
896 desc = &descs[desc->next];
897 if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) {
902 desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
903 if (unlikely(!desc_addr)) {
909 desc_avail = desc->len;
912 desc_avail = desc->len - dev->vhost_hlen;
913 desc_offset = dev->vhost_hlen;
916 rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset));
918 PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), desc_avail, 0);
921 mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
925 cpy_len = RTE_MIN(desc_avail, mbuf_avail);
928 * A desc buf might across two host physical pages that are
929 * not continuous. In such case (gpa_to_hpa returns 0), data
930 * will be copied even though zero copy is enabled.
932 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
933 desc->addr + desc_offset, cpy_len)))) {
934 cur->data_len = cpy_len;
936 cur->buf_addr = (void *)(uintptr_t)desc_addr;
937 cur->buf_physaddr = hpa;
940 * In zero copy mode, one mbuf can only reference data
941 * for one or partial of one desc buff.
943 mbuf_avail = cpy_len;
945 if (likely(cpy_len > MAX_BATCH_LEN ||
946 copy_nb >= vq->size)) {
947 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
949 (void *)((uintptr_t)(desc_addr +
953 batch_copy[copy_nb].dst =
954 rte_pktmbuf_mtod_offset(cur, void *,
956 batch_copy[copy_nb].src =
957 (void *)((uintptr_t)(desc_addr +
959 batch_copy[copy_nb].len = cpy_len;
964 mbuf_avail -= cpy_len;
965 mbuf_offset += cpy_len;
966 desc_avail -= cpy_len;
967 desc_offset += cpy_len;
969 /* This desc reaches to its end, get the next one */
970 if (desc_avail == 0) {
971 if ((desc->flags & VRING_DESC_F_NEXT) == 0)
974 if (unlikely(desc->next >= max_desc ||
975 ++nr_desc > max_desc)) {
979 desc = &descs[desc->next];
980 if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) {
985 desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
986 if (unlikely(!desc_addr)) {
991 rte_prefetch0((void *)(uintptr_t)desc_addr);
994 desc_avail = desc->len;
996 PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0);
1000 * This mbuf reaches to its end, get a new one
1001 * to hold more data.
1003 if (mbuf_avail == 0) {
1004 cur = rte_pktmbuf_alloc(mbuf_pool);
1005 if (unlikely(cur == NULL)) {
1006 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1007 "allocate memory for mbuf.\n");
1011 if (unlikely(dev->dequeue_zero_copy))
1012 rte_mbuf_refcnt_update(cur, 1);
1015 prev->data_len = mbuf_offset;
1017 m->pkt_len += mbuf_offset;
1021 mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1025 prev->data_len = mbuf_offset;
1026 m->pkt_len += mbuf_offset;
1029 vhost_dequeue_offload(hdr, m);
1032 vq->batch_copy_nb_elems = copy_nb;
1037 static __rte_always_inline void
1038 update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
1039 uint32_t used_idx, uint32_t desc_idx)
1041 vq->used->ring[used_idx].id = desc_idx;
1042 vq->used->ring[used_idx].len = 0;
1043 vhost_log_used_vring(dev, vq,
1044 offsetof(struct vring_used, ring[used_idx]),
1045 sizeof(vq->used->ring[used_idx]));
1048 static __rte_always_inline void
1049 update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
1052 if (unlikely(count == 0))
1058 vq->used->idx += count;
1059 vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
1060 sizeof(vq->used->idx));
1062 /* Kick guest if required. */
1063 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
1064 && (vq->callfd >= 0))
1065 eventfd_write(vq->callfd, (eventfd_t)1);
1068 static __rte_always_inline struct zcopy_mbuf *
1069 get_zmbuf(struct vhost_virtqueue *vq)
1075 /* search [last_zmbuf_idx, zmbuf_size) */
1076 i = vq->last_zmbuf_idx;
1077 last = vq->zmbuf_size;
1080 for (; i < last; i++) {
1081 if (vq->zmbufs[i].in_use == 0) {
1082 vq->last_zmbuf_idx = i + 1;
1083 vq->zmbufs[i].in_use = 1;
1084 return &vq->zmbufs[i];
1090 /* search [0, last_zmbuf_idx) */
1092 last = vq->last_zmbuf_idx;
1099 static __rte_always_inline bool
1100 mbuf_is_consumed(struct rte_mbuf *m)
1103 if (rte_mbuf_refcnt_read(m) > 1)
1112 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
1113 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1115 struct virtio_net *dev;
1116 struct rte_mbuf *rarp_mbuf = NULL;
1117 struct vhost_virtqueue *vq;
1118 uint32_t desc_indexes[MAX_PKT_BURST];
1121 uint16_t free_entries;
1124 dev = get_device(vid);
1128 if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
1129 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1130 dev->vid, __func__, queue_id);
1134 vq = dev->virtqueue[queue_id];
1135 if (unlikely(vq->enabled == 0))
1138 vq->batch_copy_nb_elems = 0;
1140 if (unlikely(dev->dequeue_zero_copy)) {
1141 struct zcopy_mbuf *zmbuf, *next;
1144 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1145 zmbuf != NULL; zmbuf = next) {
1146 next = TAILQ_NEXT(zmbuf, next);
1148 if (mbuf_is_consumed(zmbuf->mbuf)) {
1149 used_idx = vq->last_used_idx++ & (vq->size - 1);
1150 update_used_ring(dev, vq, used_idx,
1154 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1155 rte_pktmbuf_free(zmbuf->mbuf);
1161 update_used_idx(dev, vq, nr_updated);
1165 * Construct a RARP broadcast packet, and inject it to the "pkts"
1166 * array, to looks like that guest actually send such packet.
1168 * Check user_send_rarp() for more information.
1170 * broadcast_rarp shares a cacheline in the virtio_net structure
1171 * with some fields that are accessed during enqueue and
1172 * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
1173 * result in false sharing between enqueue and dequeue.
1175 * Prevent unnecessary false sharing by reading broadcast_rarp first
1176 * and only performing cmpset if the read indicates it is likely to
1180 if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
1181 rte_atomic16_cmpset((volatile uint16_t *)
1182 &dev->broadcast_rarp.cnt, 1, 0))) {
1184 rarp_mbuf = rte_pktmbuf_alloc(mbuf_pool);
1185 if (rarp_mbuf == NULL) {
1186 RTE_LOG(ERR, VHOST_DATA,
1187 "Failed to allocate memory for mbuf.\n");
1191 if (make_rarp_packet(rarp_mbuf, &dev->mac)) {
1192 rte_pktmbuf_free(rarp_mbuf);
1199 free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1201 if (free_entries == 0)
1204 LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1206 /* Prefetch available and used ring */
1207 avail_idx = vq->last_avail_idx & (vq->size - 1);
1208 used_idx = vq->last_used_idx & (vq->size - 1);
1209 rte_prefetch0(&vq->avail->ring[avail_idx]);
1210 rte_prefetch0(&vq->used->ring[used_idx]);
1212 count = RTE_MIN(count, MAX_PKT_BURST);
1213 count = RTE_MIN(count, free_entries);
1214 LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1217 /* Retrieve all of the head indexes first to avoid caching issues. */
1218 for (i = 0; i < count; i++) {
1219 avail_idx = (vq->last_avail_idx + i) & (vq->size - 1);
1220 used_idx = (vq->last_used_idx + i) & (vq->size - 1);
1221 desc_indexes[i] = vq->avail->ring[avail_idx];
1223 if (likely(dev->dequeue_zero_copy == 0))
1224 update_used_ring(dev, vq, used_idx, desc_indexes[i]);
1227 /* Prefetch descriptor index. */
1228 rte_prefetch0(&vq->desc[desc_indexes[0]]);
1229 for (i = 0; i < count; i++) {
1230 struct vring_desc *desc;
1234 if (likely(i + 1 < count))
1235 rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
1237 if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) {
1238 desc = (struct vring_desc *)(uintptr_t)
1239 rte_vhost_gpa_to_vva(dev->mem,
1240 vq->desc[desc_indexes[i]].addr);
1241 if (unlikely(!desc))
1244 rte_prefetch0(desc);
1245 sz = vq->desc[desc_indexes[i]].len / sizeof(*desc);
1250 idx = desc_indexes[i];
1253 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
1254 if (unlikely(pkts[i] == NULL)) {
1255 RTE_LOG(ERR, VHOST_DATA,
1256 "Failed to allocate memory for mbuf.\n");
1260 err = copy_desc_to_mbuf(dev, vq, desc, sz, pkts[i], idx,
1262 if (unlikely(err)) {
1263 rte_pktmbuf_free(pkts[i]);
1267 if (unlikely(dev->dequeue_zero_copy)) {
1268 struct zcopy_mbuf *zmbuf;
1270 zmbuf = get_zmbuf(vq);
1272 rte_pktmbuf_free(pkts[i]);
1275 zmbuf->mbuf = pkts[i];
1276 zmbuf->desc_idx = desc_indexes[i];
1279 * Pin lock the mbuf; we will check later to see
1280 * whether the mbuf is freed (when we are the last
1281 * user) or not. If that's the case, we then could
1282 * update the used ring safely.
1284 rte_mbuf_refcnt_update(pkts[i], 1);
1287 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1290 vq->last_avail_idx += i;
1292 if (likely(dev->dequeue_zero_copy == 0)) {
1293 do_data_copy_dequeue(vq);
1294 vq->last_used_idx += i;
1295 update_used_idx(dev, vq, i);
1299 if (unlikely(rarp_mbuf != NULL)) {
1301 * Inject it to the head of "pkts" array, so that switch's mac
1302 * learning table will get updated first.
1304 memmove(&pkts[1], pkts, i * sizeof(struct rte_mbuf *));
1305 pkts[0] = rarp_mbuf;