1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
7 #include <linux/virtio_net.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
13 #include <rte_vhost.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
24 #define MAX_PKT_BURST 32
26 #define MAX_BATCH_LEN 256
28 static __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
31 return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
35 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
37 return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
40 static __rte_always_inline void
41 do_flush_shadow_used_ring_split(struct virtio_net *dev,
42 struct vhost_virtqueue *vq,
43 uint16_t to, uint16_t from, uint16_t size)
45 rte_memcpy(&vq->used->ring[to],
46 &vq->shadow_used_split[from],
47 size * sizeof(struct vring_used_elem));
48 vhost_log_cache_used_vring(dev, vq,
49 offsetof(struct vring_used, ring[to]),
50 size * sizeof(struct vring_used_elem));
53 static __rte_always_inline void
54 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
56 uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
58 if (used_idx + vq->shadow_used_idx <= vq->size) {
59 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
64 /* update used ring interval [used_idx, vq->size] */
65 size = vq->size - used_idx;
66 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
68 /* update the left half used ring interval [0, left_size] */
69 do_flush_shadow_used_ring_split(dev, vq, 0, size,
70 vq->shadow_used_idx - size);
72 vq->last_used_idx += vq->shadow_used_idx;
76 vhost_log_cache_sync(dev, vq);
78 *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
79 vq->shadow_used_idx = 0;
80 vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
81 sizeof(vq->used->idx));
84 static __rte_always_inline void
85 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
86 uint16_t desc_idx, uint32_t len)
88 uint16_t i = vq->shadow_used_idx++;
90 vq->shadow_used_split[i].id = desc_idx;
91 vq->shadow_used_split[i].len = len;
94 static __rte_always_inline void
95 flush_shadow_used_ring_packed(struct virtio_net *dev,
96 struct vhost_virtqueue *vq)
99 uint16_t used_idx = vq->last_used_idx;
100 uint16_t head_idx = vq->last_used_idx;
101 uint16_t head_flags = 0;
103 /* Split loop in two to save memory barriers */
104 for (i = 0; i < vq->shadow_used_idx; i++) {
105 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
106 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
108 used_idx += vq->shadow_used_packed[i].count;
109 if (used_idx >= vq->size)
110 used_idx -= vq->size;
115 for (i = 0; i < vq->shadow_used_idx; i++) {
118 if (vq->shadow_used_packed[i].len)
119 flags = VRING_DESC_F_WRITE;
123 if (vq->used_wrap_counter) {
124 flags |= VRING_DESC_F_USED;
125 flags |= VRING_DESC_F_AVAIL;
127 flags &= ~VRING_DESC_F_USED;
128 flags &= ~VRING_DESC_F_AVAIL;
132 vq->desc_packed[vq->last_used_idx].flags = flags;
134 vhost_log_cache_used_vring(dev, vq,
136 sizeof(struct vring_packed_desc),
137 sizeof(struct vring_packed_desc));
139 head_idx = vq->last_used_idx;
143 vq->last_used_idx += vq->shadow_used_packed[i].count;
144 if (vq->last_used_idx >= vq->size) {
145 vq->used_wrap_counter ^= 1;
146 vq->last_used_idx -= vq->size;
150 vq->desc_packed[head_idx].flags = head_flags;
152 vhost_log_cache_used_vring(dev, vq,
154 sizeof(struct vring_packed_desc),
155 sizeof(struct vring_packed_desc));
157 vq->shadow_used_idx = 0;
158 vhost_log_cache_sync(dev, vq);
161 static __rte_always_inline void
162 update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
163 uint16_t desc_idx, uint32_t len, uint16_t count)
165 uint16_t i = vq->shadow_used_idx++;
167 vq->shadow_used_packed[i].id = desc_idx;
168 vq->shadow_used_packed[i].len = len;
169 vq->shadow_used_packed[i].count = count;
173 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
175 struct batch_copy_elem *elem = vq->batch_copy_elems;
176 uint16_t count = vq->batch_copy_nb_elems;
179 for (i = 0; i < count; i++) {
180 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
181 vhost_log_cache_write(dev, vq, elem[i].log_addr, elem[i].len);
182 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
185 vq->batch_copy_nb_elems = 0;
189 do_data_copy_dequeue(struct vhost_virtqueue *vq)
191 struct batch_copy_elem *elem = vq->batch_copy_elems;
192 uint16_t count = vq->batch_copy_nb_elems;
195 for (i = 0; i < count; i++)
196 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
198 vq->batch_copy_nb_elems = 0;
201 /* avoid write operation when necessary, to lessen cache issues */
202 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
203 if ((var) != (val)) \
207 static __rte_always_inline void
208 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
210 uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
212 if (m_buf->ol_flags & PKT_TX_TCP_SEG)
213 csum_l4 |= PKT_TX_TCP_CKSUM;
216 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
217 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
220 case PKT_TX_TCP_CKSUM:
221 net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
224 case PKT_TX_UDP_CKSUM:
225 net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
228 case PKT_TX_SCTP_CKSUM:
229 net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
234 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
235 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
236 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
239 /* IP cksum verification cannot be bypassed, then calculate here */
240 if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
241 struct rte_ipv4_hdr *ipv4_hdr;
243 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
245 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
248 if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
249 if (m_buf->ol_flags & PKT_TX_IPV4)
250 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
252 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
253 net_hdr->gso_size = m_buf->tso_segsz;
254 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
256 } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
257 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
258 net_hdr->gso_size = m_buf->tso_segsz;
259 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
262 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
263 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
264 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
268 static __rte_always_inline int
269 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
270 struct buf_vector *buf_vec, uint16_t *vec_idx,
271 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
273 uint16_t vec_id = *vec_idx;
277 uint64_t desc_chunck_len = desc_len;
279 if (unlikely(vec_id >= BUF_VECTOR_MAX))
282 desc_addr = vhost_iova_to_vva(dev, vq,
286 if (unlikely(!desc_addr))
289 buf_vec[vec_id].buf_iova = desc_iova;
290 buf_vec[vec_id].buf_addr = desc_addr;
291 buf_vec[vec_id].buf_len = desc_chunck_len;
293 desc_len -= desc_chunck_len;
294 desc_iova += desc_chunck_len;
302 static __rte_always_inline int
303 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
304 uint32_t avail_idx, uint16_t *vec_idx,
305 struct buf_vector *buf_vec, uint16_t *desc_chain_head,
306 uint32_t *desc_chain_len, uint8_t perm)
308 uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
309 uint16_t vec_id = *vec_idx;
312 uint32_t nr_descs = vq->size;
314 struct vring_desc *descs = vq->desc;
315 struct vring_desc *idesc = NULL;
317 if (unlikely(idx >= vq->size))
320 *desc_chain_head = idx;
322 if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
323 dlen = vq->desc[idx].len;
324 nr_descs = dlen / sizeof(struct vring_desc);
325 if (unlikely(nr_descs > vq->size))
328 descs = (struct vring_desc *)(uintptr_t)
329 vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
332 if (unlikely(!descs))
335 if (unlikely(dlen < vq->desc[idx].len)) {
337 * The indirect desc table is not contiguous
338 * in process VA space, we have to copy it.
340 idesc = vhost_alloc_copy_ind_table(dev, vq,
341 vq->desc[idx].addr, vq->desc[idx].len);
342 if (unlikely(!idesc))
352 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
353 free_ind_table(idesc);
357 len += descs[idx].len;
359 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
360 descs[idx].addr, descs[idx].len,
362 free_ind_table(idesc);
366 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
369 idx = descs[idx].next;
372 *desc_chain_len = len;
375 if (unlikely(!!idesc))
376 free_ind_table(idesc);
382 * Returns -1 on fail, 0 on success
385 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
386 uint32_t size, struct buf_vector *buf_vec,
387 uint16_t *num_buffers, uint16_t avail_head,
391 uint16_t vec_idx = 0;
392 uint16_t max_tries, tries = 0;
394 uint16_t head_idx = 0;
398 cur_idx = vq->last_avail_idx;
400 if (rxvq_is_mergeable(dev))
401 max_tries = vq->size - 1;
406 if (unlikely(cur_idx == avail_head))
409 * if we tried all available ring items, and still
410 * can't get enough buf, it means something abnormal
413 if (unlikely(++tries > max_tries))
416 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
419 VHOST_ACCESS_RW) < 0))
421 len = RTE_MIN(len, size);
422 update_shadow_used_ring_split(vq, head_idx, len);
434 static __rte_always_inline int
435 fill_vec_buf_packed_indirect(struct virtio_net *dev,
436 struct vhost_virtqueue *vq,
437 struct vring_packed_desc *desc, uint16_t *vec_idx,
438 struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
442 uint16_t vec_id = *vec_idx;
444 struct vring_packed_desc *descs, *idescs = NULL;
447 descs = (struct vring_packed_desc *)(uintptr_t)
448 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
449 if (unlikely(!descs))
452 if (unlikely(dlen < desc->len)) {
454 * The indirect desc table is not contiguous
455 * in process VA space, we have to copy it.
457 idescs = vhost_alloc_copy_ind_table(dev,
458 vq, desc->addr, desc->len);
459 if (unlikely(!idescs))
465 nr_descs = desc->len / sizeof(struct vring_packed_desc);
466 if (unlikely(nr_descs >= vq->size)) {
467 free_ind_table(idescs);
471 for (i = 0; i < nr_descs; i++) {
472 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
473 free_ind_table(idescs);
477 *len += descs[i].len;
478 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
479 descs[i].addr, descs[i].len,
485 if (unlikely(!!idescs))
486 free_ind_table(idescs);
491 static __rte_always_inline int
492 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
493 uint16_t avail_idx, uint16_t *desc_count,
494 struct buf_vector *buf_vec, uint16_t *vec_idx,
495 uint16_t *buf_id, uint32_t *len, uint8_t perm)
497 bool wrap_counter = vq->avail_wrap_counter;
498 struct vring_packed_desc *descs = vq->desc_packed;
499 uint16_t vec_id = *vec_idx;
501 if (avail_idx < vq->last_avail_idx)
504 if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
508 * The ordering between desc flags and desc
509 * content reads need to be enforced.
517 if (unlikely(vec_id >= BUF_VECTOR_MAX))
520 if (unlikely(*desc_count >= vq->size))
524 *buf_id = descs[avail_idx].id;
526 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
527 if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
533 *len += descs[avail_idx].len;
535 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
536 descs[avail_idx].addr,
537 descs[avail_idx].len,
542 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
545 if (++avail_idx >= vq->size) {
546 avail_idx -= vq->size;
557 * Returns -1 on fail, 0 on success
560 reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
561 uint32_t size, struct buf_vector *buf_vec,
562 uint16_t *nr_vec, uint16_t *num_buffers,
566 uint16_t vec_idx = 0;
567 uint16_t max_tries, tries = 0;
574 avail_idx = vq->last_avail_idx;
576 if (rxvq_is_mergeable(dev))
577 max_tries = vq->size - 1;
583 * if we tried all available ring items, and still
584 * can't get enough buf, it means something abnormal
587 if (unlikely(++tries > max_tries))
590 if (unlikely(fill_vec_buf_packed(dev, vq,
591 avail_idx, &desc_count,
594 VHOST_ACCESS_RW) < 0))
597 len = RTE_MIN(len, size);
598 update_shadow_used_ring_packed(vq, buf_id, len, desc_count);
601 avail_idx += desc_count;
602 if (avail_idx >= vq->size)
603 avail_idx -= vq->size;
605 *nr_descs += desc_count;
614 static __rte_noinline void
615 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
616 struct buf_vector *buf_vec,
617 struct virtio_net_hdr_mrg_rxbuf *hdr)
620 uint64_t remain = dev->vhost_hlen;
621 uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
622 uint64_t iova = buf_vec->buf_iova;
625 len = RTE_MIN(remain,
627 dst = buf_vec->buf_addr;
628 rte_memcpy((void *)(uintptr_t)dst,
629 (void *)(uintptr_t)src,
632 PRINT_PACKET(dev, (uintptr_t)dst,
634 vhost_log_cache_write(dev, vq,
644 static __rte_always_inline int
645 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
646 struct rte_mbuf *m, struct buf_vector *buf_vec,
647 uint16_t nr_vec, uint16_t num_buffers)
649 uint32_t vec_idx = 0;
650 uint32_t mbuf_offset, mbuf_avail;
651 uint32_t buf_offset, buf_avail;
652 uint64_t buf_addr, buf_iova, buf_len;
655 struct rte_mbuf *hdr_mbuf;
656 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
657 struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
660 if (unlikely(m == NULL)) {
665 buf_addr = buf_vec[vec_idx].buf_addr;
666 buf_iova = buf_vec[vec_idx].buf_iova;
667 buf_len = buf_vec[vec_idx].buf_len;
670 rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
672 if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
679 if (unlikely(buf_len < dev->vhost_hlen))
682 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
684 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
685 dev->vid, num_buffers);
687 if (unlikely(buf_len < dev->vhost_hlen)) {
688 buf_offset = dev->vhost_hlen - buf_len;
690 buf_addr = buf_vec[vec_idx].buf_addr;
691 buf_iova = buf_vec[vec_idx].buf_iova;
692 buf_len = buf_vec[vec_idx].buf_len;
693 buf_avail = buf_len - buf_offset;
695 buf_offset = dev->vhost_hlen;
696 buf_avail = buf_len - dev->vhost_hlen;
699 mbuf_avail = rte_pktmbuf_data_len(m);
701 while (mbuf_avail != 0 || m->next != NULL) {
702 /* done with current buf, get the next one */
703 if (buf_avail == 0) {
705 if (unlikely(vec_idx >= nr_vec)) {
710 buf_addr = buf_vec[vec_idx].buf_addr;
711 buf_iova = buf_vec[vec_idx].buf_iova;
712 buf_len = buf_vec[vec_idx].buf_len;
714 /* Prefetch next buffer address. */
715 if (vec_idx + 1 < nr_vec)
716 rte_prefetch0((void *)(uintptr_t)
717 buf_vec[vec_idx + 1].buf_addr);
722 /* done with current mbuf, get the next one */
723 if (mbuf_avail == 0) {
727 mbuf_avail = rte_pktmbuf_data_len(m);
731 virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
732 if (rxvq_is_mergeable(dev))
733 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
736 if (unlikely(hdr == &tmp_hdr)) {
737 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
739 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
741 vhost_log_cache_write(dev, vq,
749 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
751 if (likely(cpy_len > MAX_BATCH_LEN ||
752 vq->batch_copy_nb_elems >= vq->size)) {
753 rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
754 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
756 vhost_log_cache_write(dev, vq, buf_iova + buf_offset,
758 PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
761 batch_copy[vq->batch_copy_nb_elems].dst =
762 (void *)((uintptr_t)(buf_addr + buf_offset));
763 batch_copy[vq->batch_copy_nb_elems].src =
764 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
765 batch_copy[vq->batch_copy_nb_elems].log_addr =
766 buf_iova + buf_offset;
767 batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
768 vq->batch_copy_nb_elems++;
771 mbuf_avail -= cpy_len;
772 mbuf_offset += cpy_len;
773 buf_avail -= cpy_len;
774 buf_offset += cpy_len;
782 static __rte_noinline uint32_t
783 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
784 struct rte_mbuf **pkts, uint32_t count)
786 uint32_t pkt_idx = 0;
787 uint16_t num_buffers;
788 struct buf_vector buf_vec[BUF_VECTOR_MAX];
791 avail_head = *((volatile uint16_t *)&vq->avail->idx);
794 * The ordering between avail index and
795 * desc reads needs to be enforced.
799 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
801 for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
802 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
805 if (unlikely(reserve_avail_buf_split(dev, vq,
806 pkt_len, buf_vec, &num_buffers,
807 avail_head, &nr_vec) < 0)) {
808 VHOST_LOG_DEBUG(VHOST_DATA,
809 "(%d) failed to get enough desc from vring\n",
811 vq->shadow_used_idx -= num_buffers;
815 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
817 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
818 dev->vid, vq->last_avail_idx,
819 vq->last_avail_idx + num_buffers);
821 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
824 vq->shadow_used_idx -= num_buffers;
828 vq->last_avail_idx += num_buffers;
831 do_data_copy_enqueue(dev, vq);
833 if (likely(vq->shadow_used_idx)) {
834 flush_shadow_used_ring_split(dev, vq);
835 vhost_vring_call_split(dev, vq);
841 static __rte_noinline uint32_t
842 virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
843 struct rte_mbuf **pkts, uint32_t count)
845 uint32_t pkt_idx = 0;
846 uint16_t num_buffers;
847 struct buf_vector buf_vec[BUF_VECTOR_MAX];
849 for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
850 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
852 uint16_t nr_descs = 0;
854 if (unlikely(reserve_avail_buf_packed(dev, vq,
855 pkt_len, buf_vec, &nr_vec,
856 &num_buffers, &nr_descs) < 0)) {
857 VHOST_LOG_DEBUG(VHOST_DATA,
858 "(%d) failed to get enough desc from vring\n",
860 vq->shadow_used_idx -= num_buffers;
864 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
866 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
867 dev->vid, vq->last_avail_idx,
868 vq->last_avail_idx + num_buffers);
870 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
873 vq->shadow_used_idx -= num_buffers;
877 vq->last_avail_idx += nr_descs;
878 if (vq->last_avail_idx >= vq->size) {
879 vq->last_avail_idx -= vq->size;
880 vq->avail_wrap_counter ^= 1;
884 do_data_copy_enqueue(dev, vq);
886 if (likely(vq->shadow_used_idx)) {
887 flush_shadow_used_ring_packed(dev, vq);
888 vhost_vring_call_packed(dev, vq);
894 static __rte_always_inline uint32_t
895 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
896 struct rte_mbuf **pkts, uint32_t count)
898 struct vhost_virtqueue *vq;
901 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
902 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
903 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
904 dev->vid, __func__, queue_id);
908 vq = dev->virtqueue[queue_id];
910 rte_spinlock_lock(&vq->access_lock);
912 if (unlikely(vq->enabled == 0))
913 goto out_access_unlock;
915 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
916 vhost_user_iotlb_rd_lock(vq);
918 if (unlikely(vq->access_ok == 0))
919 if (unlikely(vring_translate(dev, vq) < 0))
922 count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
926 if (vq_is_packed(dev))
927 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
929 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
932 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
933 vhost_user_iotlb_rd_unlock(vq);
936 rte_spinlock_unlock(&vq->access_lock);
942 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
943 struct rte_mbuf **pkts, uint16_t count)
945 struct virtio_net *dev = get_device(vid);
950 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
951 RTE_LOG(ERR, VHOST_DATA,
952 "(%d) %s: built-in vhost net backend is disabled.\n",
957 return virtio_dev_rx(dev, queue_id, pkts, count);
961 virtio_net_with_host_offload(struct virtio_net *dev)
964 ((1ULL << VIRTIO_NET_F_CSUM) |
965 (1ULL << VIRTIO_NET_F_HOST_ECN) |
966 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
967 (1ULL << VIRTIO_NET_F_HOST_TSO6) |
968 (1ULL << VIRTIO_NET_F_HOST_UFO)))
975 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
977 struct rte_ipv4_hdr *ipv4_hdr;
978 struct rte_ipv6_hdr *ipv6_hdr;
980 struct rte_ether_hdr *eth_hdr;
983 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
985 m->l2_len = sizeof(struct rte_ether_hdr);
986 ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
988 if (ethertype == RTE_ETHER_TYPE_VLAN) {
989 struct rte_vlan_hdr *vlan_hdr =
990 (struct rte_vlan_hdr *)(eth_hdr + 1);
992 m->l2_len += sizeof(struct rte_vlan_hdr);
993 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
996 l3_hdr = (char *)eth_hdr + m->l2_len;
999 case RTE_ETHER_TYPE_IPV4:
1001 *l4_proto = ipv4_hdr->next_proto_id;
1002 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
1003 *l4_hdr = (char *)l3_hdr + m->l3_len;
1004 m->ol_flags |= PKT_TX_IPV4;
1006 case RTE_ETHER_TYPE_IPV6:
1008 *l4_proto = ipv6_hdr->proto;
1009 m->l3_len = sizeof(struct rte_ipv6_hdr);
1010 *l4_hdr = (char *)l3_hdr + m->l3_len;
1011 m->ol_flags |= PKT_TX_IPV6;
1021 static __rte_always_inline void
1022 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1024 uint16_t l4_proto = 0;
1025 void *l4_hdr = NULL;
1026 struct rte_tcp_hdr *tcp_hdr = NULL;
1028 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1031 parse_ethernet(m, &l4_proto, &l4_hdr);
1032 if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1033 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1034 switch (hdr->csum_offset) {
1035 case (offsetof(struct rte_tcp_hdr, cksum)):
1036 if (l4_proto == IPPROTO_TCP)
1037 m->ol_flags |= PKT_TX_TCP_CKSUM;
1039 case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1040 if (l4_proto == IPPROTO_UDP)
1041 m->ol_flags |= PKT_TX_UDP_CKSUM;
1043 case (offsetof(struct rte_sctp_hdr, cksum)):
1044 if (l4_proto == IPPROTO_SCTP)
1045 m->ol_flags |= PKT_TX_SCTP_CKSUM;
1053 if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1054 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1055 case VIRTIO_NET_HDR_GSO_TCPV4:
1056 case VIRTIO_NET_HDR_GSO_TCPV6:
1058 m->ol_flags |= PKT_TX_TCP_SEG;
1059 m->tso_segsz = hdr->gso_size;
1060 m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1062 case VIRTIO_NET_HDR_GSO_UDP:
1063 m->ol_flags |= PKT_TX_UDP_SEG;
1064 m->tso_segsz = hdr->gso_size;
1065 m->l4_len = sizeof(struct rte_udp_hdr);
1068 RTE_LOG(WARNING, VHOST_DATA,
1069 "unsupported gso type %u.\n", hdr->gso_type);
1075 static __rte_noinline void
1076 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1077 struct buf_vector *buf_vec)
1080 uint64_t remain = sizeof(struct virtio_net_hdr);
1082 uint64_t dst = (uint64_t)(uintptr_t)hdr;
1085 len = RTE_MIN(remain, buf_vec->buf_len);
1086 src = buf_vec->buf_addr;
1087 rte_memcpy((void *)(uintptr_t)dst,
1088 (void *)(uintptr_t)src, len);
1096 static __rte_always_inline int
1097 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1098 struct buf_vector *buf_vec, uint16_t nr_vec,
1099 struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1101 uint32_t buf_avail, buf_offset;
1102 uint64_t buf_addr, buf_iova, buf_len;
1103 uint32_t mbuf_avail, mbuf_offset;
1105 struct rte_mbuf *cur = m, *prev = m;
1106 struct virtio_net_hdr tmp_hdr;
1107 struct virtio_net_hdr *hdr = NULL;
1108 /* A counter to avoid desc dead loop chain */
1109 uint16_t vec_idx = 0;
1110 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1113 buf_addr = buf_vec[vec_idx].buf_addr;
1114 buf_iova = buf_vec[vec_idx].buf_iova;
1115 buf_len = buf_vec[vec_idx].buf_len;
1117 if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1122 if (likely(nr_vec > 1))
1123 rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
1125 if (virtio_net_with_host_offload(dev)) {
1126 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1128 * No luck, the virtio-net header doesn't fit
1129 * in a contiguous virtual area.
1131 copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1134 hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1140 * A virtio driver normally uses at least 2 desc buffers
1141 * for Tx: the first for storing the header, and others
1142 * for storing the data.
1144 if (unlikely(buf_len < dev->vhost_hlen)) {
1145 buf_offset = dev->vhost_hlen - buf_len;
1147 buf_addr = buf_vec[vec_idx].buf_addr;
1148 buf_iova = buf_vec[vec_idx].buf_iova;
1149 buf_len = buf_vec[vec_idx].buf_len;
1150 buf_avail = buf_len - buf_offset;
1151 } else if (buf_len == dev->vhost_hlen) {
1152 if (unlikely(++vec_idx >= nr_vec))
1154 buf_addr = buf_vec[vec_idx].buf_addr;
1155 buf_iova = buf_vec[vec_idx].buf_iova;
1156 buf_len = buf_vec[vec_idx].buf_len;
1159 buf_avail = buf_len;
1161 buf_offset = dev->vhost_hlen;
1162 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1165 rte_prefetch0((void *)(uintptr_t)
1166 (buf_addr + buf_offset));
1169 (uintptr_t)(buf_addr + buf_offset),
1170 (uint32_t)buf_avail, 0);
1173 mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
1177 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1180 * A desc buf might across two host physical pages that are
1181 * not continuous. In such case (gpa_to_hpa returns 0), data
1182 * will be copied even though zero copy is enabled.
1184 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1185 buf_iova + buf_offset, cpy_len)))) {
1186 cur->data_len = cpy_len;
1189 (void *)(uintptr_t)(buf_addr + buf_offset);
1190 cur->buf_iova = hpa;
1193 * In zero copy mode, one mbuf can only reference data
1194 * for one or partial of one desc buff.
1196 mbuf_avail = cpy_len;
1198 if (likely(cpy_len > MAX_BATCH_LEN ||
1199 vq->batch_copy_nb_elems >= vq->size ||
1200 (hdr && cur == m))) {
1201 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1203 (void *)((uintptr_t)(buf_addr +
1207 batch_copy[vq->batch_copy_nb_elems].dst =
1208 rte_pktmbuf_mtod_offset(cur, void *,
1210 batch_copy[vq->batch_copy_nb_elems].src =
1211 (void *)((uintptr_t)(buf_addr +
1213 batch_copy[vq->batch_copy_nb_elems].len =
1215 vq->batch_copy_nb_elems++;
1219 mbuf_avail -= cpy_len;
1220 mbuf_offset += cpy_len;
1221 buf_avail -= cpy_len;
1222 buf_offset += cpy_len;
1224 /* This buf reaches to its end, get the next one */
1225 if (buf_avail == 0) {
1226 if (++vec_idx >= nr_vec)
1229 buf_addr = buf_vec[vec_idx].buf_addr;
1230 buf_iova = buf_vec[vec_idx].buf_iova;
1231 buf_len = buf_vec[vec_idx].buf_len;
1234 * Prefecth desc n + 1 buffer while
1235 * desc n buffer is processed.
1237 if (vec_idx + 1 < nr_vec)
1238 rte_prefetch0((void *)(uintptr_t)
1239 buf_vec[vec_idx + 1].buf_addr);
1242 buf_avail = buf_len;
1244 PRINT_PACKET(dev, (uintptr_t)buf_addr,
1245 (uint32_t)buf_avail, 0);
1249 * This mbuf reaches to its end, get a new one
1250 * to hold more data.
1252 if (mbuf_avail == 0) {
1253 cur = rte_pktmbuf_alloc(mbuf_pool);
1254 if (unlikely(cur == NULL)) {
1255 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1256 "allocate memory for mbuf.\n");
1260 if (unlikely(dev->dequeue_zero_copy))
1261 rte_mbuf_refcnt_update(cur, 1);
1264 prev->data_len = mbuf_offset;
1266 m->pkt_len += mbuf_offset;
1270 mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1274 prev->data_len = mbuf_offset;
1275 m->pkt_len += mbuf_offset;
1278 vhost_dequeue_offload(hdr, m);
1285 static __rte_always_inline struct zcopy_mbuf *
1286 get_zmbuf(struct vhost_virtqueue *vq)
1292 /* search [last_zmbuf_idx, zmbuf_size) */
1293 i = vq->last_zmbuf_idx;
1294 last = vq->zmbuf_size;
1297 for (; i < last; i++) {
1298 if (vq->zmbufs[i].in_use == 0) {
1299 vq->last_zmbuf_idx = i + 1;
1300 vq->zmbufs[i].in_use = 1;
1301 return &vq->zmbufs[i];
1307 /* search [0, last_zmbuf_idx) */
1309 last = vq->last_zmbuf_idx;
1316 static __rte_noinline uint16_t
1317 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1318 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1321 uint16_t free_entries;
1323 if (unlikely(dev->dequeue_zero_copy)) {
1324 struct zcopy_mbuf *zmbuf, *next;
1326 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1327 zmbuf != NULL; zmbuf = next) {
1328 next = TAILQ_NEXT(zmbuf, next);
1330 if (mbuf_is_consumed(zmbuf->mbuf)) {
1331 update_shadow_used_ring_split(vq,
1332 zmbuf->desc_idx, 0);
1333 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1334 restore_mbuf(zmbuf->mbuf);
1335 rte_pktmbuf_free(zmbuf->mbuf);
1341 if (likely(vq->shadow_used_idx)) {
1342 flush_shadow_used_ring_split(dev, vq);
1343 vhost_vring_call_split(dev, vq);
1347 free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1349 if (free_entries == 0)
1353 * The ordering between avail index and
1354 * desc reads needs to be enforced.
1358 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1360 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1362 count = RTE_MIN(count, MAX_PKT_BURST);
1363 count = RTE_MIN(count, free_entries);
1364 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1367 for (i = 0; i < count; i++) {
1368 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1371 uint16_t nr_vec = 0;
1374 if (unlikely(fill_vec_buf_split(dev, vq,
1375 vq->last_avail_idx + i,
1377 &head_idx, &dummy_len,
1378 VHOST_ACCESS_RO) < 0))
1381 if (likely(dev->dequeue_zero_copy == 0))
1382 update_shadow_used_ring_split(vq, head_idx, 0);
1384 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
1386 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
1387 if (unlikely(pkts[i] == NULL)) {
1388 RTE_LOG(ERR, VHOST_DATA,
1389 "Failed to allocate memory for mbuf.\n");
1393 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1395 if (unlikely(err)) {
1396 rte_pktmbuf_free(pkts[i]);
1400 if (unlikely(dev->dequeue_zero_copy)) {
1401 struct zcopy_mbuf *zmbuf;
1403 zmbuf = get_zmbuf(vq);
1405 rte_pktmbuf_free(pkts[i]);
1408 zmbuf->mbuf = pkts[i];
1409 zmbuf->desc_idx = head_idx;
1412 * Pin lock the mbuf; we will check later to see
1413 * whether the mbuf is freed (when we are the last
1414 * user) or not. If that's the case, we then could
1415 * update the used ring safely.
1417 rte_mbuf_refcnt_update(pkts[i], 1);
1420 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1423 vq->last_avail_idx += i;
1425 if (likely(dev->dequeue_zero_copy == 0)) {
1426 do_data_copy_dequeue(vq);
1427 if (unlikely(i < count))
1428 vq->shadow_used_idx = i;
1429 if (likely(vq->shadow_used_idx)) {
1430 flush_shadow_used_ring_split(dev, vq);
1431 vhost_vring_call_split(dev, vq);
1438 static __rte_noinline uint16_t
1439 virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1440 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1444 if (unlikely(dev->dequeue_zero_copy)) {
1445 struct zcopy_mbuf *zmbuf, *next;
1447 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1448 zmbuf != NULL; zmbuf = next) {
1449 next = TAILQ_NEXT(zmbuf, next);
1451 if (mbuf_is_consumed(zmbuf->mbuf)) {
1452 update_shadow_used_ring_packed(vq,
1457 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1458 restore_mbuf(zmbuf->mbuf);
1459 rte_pktmbuf_free(zmbuf->mbuf);
1465 if (likely(vq->shadow_used_idx)) {
1466 flush_shadow_used_ring_packed(dev, vq);
1467 vhost_vring_call_packed(dev, vq);
1471 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1473 count = RTE_MIN(count, MAX_PKT_BURST);
1474 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1477 for (i = 0; i < count; i++) {
1478 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1481 uint16_t desc_count, nr_vec = 0;
1484 if (unlikely(fill_vec_buf_packed(dev, vq,
1485 vq->last_avail_idx, &desc_count,
1487 &buf_id, &dummy_len,
1488 VHOST_ACCESS_RO) < 0))
1491 if (likely(dev->dequeue_zero_copy == 0))
1492 update_shadow_used_ring_packed(vq, buf_id, 0,
1495 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
1497 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
1498 if (unlikely(pkts[i] == NULL)) {
1499 RTE_LOG(ERR, VHOST_DATA,
1500 "Failed to allocate memory for mbuf.\n");
1504 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1506 if (unlikely(err)) {
1507 rte_pktmbuf_free(pkts[i]);
1511 if (unlikely(dev->dequeue_zero_copy)) {
1512 struct zcopy_mbuf *zmbuf;
1514 zmbuf = get_zmbuf(vq);
1516 rte_pktmbuf_free(pkts[i]);
1519 zmbuf->mbuf = pkts[i];
1520 zmbuf->desc_idx = buf_id;
1521 zmbuf->desc_count = desc_count;
1524 * Pin lock the mbuf; we will check later to see
1525 * whether the mbuf is freed (when we are the last
1526 * user) or not. If that's the case, we then could
1527 * update the used ring safely.
1529 rte_mbuf_refcnt_update(pkts[i], 1);
1532 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1535 vq->last_avail_idx += desc_count;
1536 if (vq->last_avail_idx >= vq->size) {
1537 vq->last_avail_idx -= vq->size;
1538 vq->avail_wrap_counter ^= 1;
1542 if (likely(dev->dequeue_zero_copy == 0)) {
1543 do_data_copy_dequeue(vq);
1544 if (unlikely(i < count))
1545 vq->shadow_used_idx = i;
1546 if (likely(vq->shadow_used_idx)) {
1547 flush_shadow_used_ring_packed(dev, vq);
1548 vhost_vring_call_packed(dev, vq);
1556 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
1557 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1559 struct virtio_net *dev;
1560 struct rte_mbuf *rarp_mbuf = NULL;
1561 struct vhost_virtqueue *vq;
1563 dev = get_device(vid);
1567 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1568 RTE_LOG(ERR, VHOST_DATA,
1569 "(%d) %s: built-in vhost net backend is disabled.\n",
1570 dev->vid, __func__);
1574 if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
1575 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1576 dev->vid, __func__, queue_id);
1580 vq = dev->virtqueue[queue_id];
1582 if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
1585 if (unlikely(vq->enabled == 0)) {
1587 goto out_access_unlock;
1590 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1591 vhost_user_iotlb_rd_lock(vq);
1593 if (unlikely(vq->access_ok == 0))
1594 if (unlikely(vring_translate(dev, vq) < 0)) {
1600 * Construct a RARP broadcast packet, and inject it to the "pkts"
1601 * array, to looks like that guest actually send such packet.
1603 * Check user_send_rarp() for more information.
1605 * broadcast_rarp shares a cacheline in the virtio_net structure
1606 * with some fields that are accessed during enqueue and
1607 * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
1608 * result in false sharing between enqueue and dequeue.
1610 * Prevent unnecessary false sharing by reading broadcast_rarp first
1611 * and only performing cmpset if the read indicates it is likely to
1614 if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
1615 rte_atomic16_cmpset((volatile uint16_t *)
1616 &dev->broadcast_rarp.cnt, 1, 0))) {
1618 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
1619 if (rarp_mbuf == NULL) {
1620 RTE_LOG(ERR, VHOST_DATA,
1621 "Failed to make RARP packet.\n");
1628 if (vq_is_packed(dev))
1629 count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
1631 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
1634 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1635 vhost_user_iotlb_rd_unlock(vq);
1638 rte_spinlock_unlock(&vq->access_lock);
1640 if (unlikely(rarp_mbuf != NULL)) {
1642 * Inject it to the head of "pkts" array, so that switch's mac
1643 * learning table will get updated first.
1645 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
1646 pkts[0] = rarp_mbuf;