1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <rte_atomic.h>
11 #include <rte_memory.h>
12 #include <rte_mempool.h>
16 #include "virtio_ring.h"
17 #include "virtio_logs.h"
18 #include "virtio_rxtx.h"
22 #define DEFAULT_TX_FREE_THRESH 32
23 #define DEFAULT_RX_FREE_THRESH 32
25 #define VIRTIO_MBUF_BURST_SZ 64
27 * Per virtio_ring.h in Linux.
28 * For virtio_pci on SMP, we don't need to order with respect to MMIO
29 * accesses through relaxed memory I/O windows, so thread_fence is
32 * For using virtio to talk to real devices (eg. vDPA) we do need real
36 virtio_mb(uint8_t weak_barriers)
39 rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
45 virtio_rmb(uint8_t weak_barriers)
48 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
54 virtio_wmb(uint8_t weak_barriers)
57 rte_atomic_thread_fence(__ATOMIC_RELEASE);
62 static inline uint16_t
63 virtqueue_fetch_flags_packed(struct vring_packed_desc *dp,
64 uint8_t weak_barriers)
69 /* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports
70 * a better perf(~1.5%), which comes from the saved branch by the compiler.
71 * The if and else branch are identical on the platforms except Arm.
74 flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
88 virtqueue_store_flags_packed(struct vring_packed_desc *dp,
89 uint16_t flags, uint8_t weak_barriers)
92 /* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports
93 * a better perf(~1.5%), which comes from the saved branch by the compiler.
94 * The if and else branch are identical on the platforms except Arm.
97 __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
108 #ifdef RTE_PMD_PACKET_PREFETCH
109 #define rte_packet_prefetch(p) rte_prefetch1(p)
111 #define rte_packet_prefetch(p) do {} while(0)
114 #define VIRTQUEUE_MAX_NAME_SZ 32
116 #define VTNET_SQ_RQ_QUEUE_IDX 0
117 #define VTNET_SQ_TQ_QUEUE_IDX 1
118 #define VTNET_SQ_CQ_QUEUE_IDX 2
120 enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
122 * The maximum virtqueue size is 2^15. Use that value as the end of
123 * descriptor chain terminator since it will never be a valid index
124 * in the descriptor table. This is used to verify we are correctly
125 * handling vq_free_cnt.
127 #define VQ_RING_DESC_CHAIN_END 32768
130 * Control the RX mode, ie. promiscuous, allmulti, etc...
131 * All commands require an "out" sg entry containing a 1 byte
132 * state value, zero = disable, non-zero = enable. Commands
133 * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
134 * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
136 #define VIRTIO_NET_CTRL_RX 0
137 #define VIRTIO_NET_CTRL_RX_PROMISC 0
138 #define VIRTIO_NET_CTRL_RX_ALLMULTI 1
139 #define VIRTIO_NET_CTRL_RX_ALLUNI 2
140 #define VIRTIO_NET_CTRL_RX_NOMULTI 3
141 #define VIRTIO_NET_CTRL_RX_NOUNI 4
142 #define VIRTIO_NET_CTRL_RX_NOBCAST 5
147 * The MAC filter table is managed by the hypervisor, the guest should
148 * assume the size is infinite. Filtering should be considered
149 * non-perfect, ie. based on hypervisor resources, the guest may
150 * received packets from sources not specified in the filter list.
152 * In addition to the class/cmd header, the TABLE_SET command requires
153 * two out scatterlists. Each contains a 4 byte count of entries followed
154 * by a concatenated byte stream of the ETH_ALEN MAC addresses. The
155 * first sg list contains unicast addresses, the second is for multicast.
156 * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
159 * The ADDR_SET command requests one out scatterlist, it contains a
160 * 6 bytes MAC address. This functionality is present if the
161 * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available.
163 struct virtio_net_ctrl_mac {
165 uint8_t macs[][RTE_ETHER_ADDR_LEN];
168 #define VIRTIO_NET_CTRL_MAC 1
169 #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
170 #define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
173 * Control VLAN filtering
175 * The VLAN filter table is controlled via a simple ADD/DEL interface.
176 * VLAN IDs not added may be filtered by the hypervisor. Del is the
177 * opposite of add. Both commands expect an out entry containing a 2
178 * byte VLAN ID. VLAN filtering is available with the
179 * VIRTIO_NET_F_CTRL_VLAN feature bit.
181 #define VIRTIO_NET_CTRL_VLAN 2
182 #define VIRTIO_NET_CTRL_VLAN_ADD 0
183 #define VIRTIO_NET_CTRL_VLAN_DEL 1
186 * Control link announce acknowledgement
188 * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
189 * driver has recevied the notification; device would clear the
190 * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
193 #define VIRTIO_NET_CTRL_ANNOUNCE 3
194 #define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
196 struct virtio_net_ctrl_hdr {
201 typedef uint8_t virtio_net_ctrl_ack;
203 #define VIRTIO_NET_OK 0
204 #define VIRTIO_NET_ERR 1
206 #define VIRTIO_MAX_CTRL_DATA 2048
208 struct virtio_pmd_ctrl {
209 struct virtio_net_ctrl_hdr hdr;
210 virtio_net_ctrl_ack status;
211 uint8_t data[VIRTIO_MAX_CTRL_DATA];
214 struct vq_desc_extra {
220 #define virtnet_rxq_to_vq(rxvq) container_of(rxvq, struct virtqueue, rxq)
221 #define virtnet_txq_to_vq(txvq) container_of(txvq, struct virtqueue, txq)
222 #define virtnet_cq_to_vq(cvq) container_of(cvq, struct virtqueue, cq)
225 struct virtio_hw *hw; /**< virtio_hw structure pointer. */
228 /**< vring keeping desc, used and avail */
233 /**< vring keeping descs and events */
234 struct vring_packed ring;
235 bool used_wrap_counter;
236 uint16_t cached_flags; /**< cached flags for descs */
237 uint16_t event_flags_shadow;
241 uint16_t vq_used_cons_idx; /**< last consumed descriptor */
242 uint16_t vq_nentries; /**< vring desc numbers */
243 uint16_t vq_free_cnt; /**< num of desc available */
244 uint16_t vq_avail_idx; /**< sync until needed */
245 uint16_t vq_free_thresh; /**< free threshold */
248 * Head of the free chain in the descriptor table. If
249 * there are no free descriptors, this will be set to
250 * VQ_RING_DESC_CHAIN_END.
252 uint16_t vq_desc_head_idx;
253 uint16_t vq_desc_tail_idx;
254 uint16_t vq_queue_index; /**< PCI queue index */
256 void *vq_ring_virt_mem; /**< linear address of vring*/
257 unsigned int vq_ring_size;
260 struct virtnet_rx rxq;
261 struct virtnet_tx txq;
262 struct virtnet_ctl cq;
265 rte_iova_t vq_ring_mem; /**< physical address of vring,
266 * or virtual address for virtio_user. */
268 uint16_t *notify_addr;
269 struct rte_mbuf **sw_ring; /**< RX software ring. */
270 struct vq_desc_extra vq_descx[0];
273 /* If multiqueue is provided by host, then we suppport it. */
274 #define VIRTIO_NET_CTRL_MQ 4
275 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
276 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
277 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
280 * This is the first element of the scatter-gather list. If you don't
281 * specify GSO or CSUM features, you can simply ignore the header.
283 struct virtio_net_hdr {
284 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /**< Use csum_start,csum_offset*/
285 #define VIRTIO_NET_HDR_F_DATA_VALID 2 /**< Checksum is valid */
287 #define VIRTIO_NET_HDR_GSO_NONE 0 /**< Not a GSO frame */
288 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /**< GSO frame, IPv4 TCP (TSO) */
289 #define VIRTIO_NET_HDR_GSO_UDP 3 /**< GSO frame, IPv4 UDP (UFO) */
290 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /**< GSO frame, IPv6 TCP */
291 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /**< TCP has ECN set */
293 uint16_t hdr_len; /**< Ethernet + IP + tcp/udp hdrs */
294 uint16_t gso_size; /**< Bytes to append to hdr_len per frame */
295 uint16_t csum_start; /**< Position to start checksumming from */
296 uint16_t csum_offset; /**< Offset after that to place checksum */
300 * This is the version of the header to use when the MRG_RXBUF
301 * feature has been negotiated.
303 struct virtio_net_hdr_mrg_rxbuf {
304 struct virtio_net_hdr hdr;
305 uint16_t num_buffers; /**< Number of merged rx buffers */
308 /* Region reserved to allow for transmit header and indirect ring */
309 #define VIRTIO_MAX_TX_INDIRECT 8
310 struct virtio_tx_region {
311 struct virtio_net_hdr_mrg_rxbuf tx_hdr;
313 struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT];
314 struct vring_packed_desc
315 tx_packed_indir[VIRTIO_MAX_TX_INDIRECT];
320 desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
322 uint16_t used, avail, flags;
324 flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers);
325 used = !!(flags & VRING_PACKED_DESC_F_USED);
326 avail = !!(flags & VRING_PACKED_DESC_F_AVAIL);
328 return avail == used && used == vq->vq_packed.used_wrap_counter;
332 vring_desc_init_packed(struct virtqueue *vq, int n)
335 for (i = 0; i < n - 1; i++) {
336 vq->vq_packed.ring.desc[i].id = i;
337 vq->vq_descx[i].next = i + 1;
339 vq->vq_packed.ring.desc[i].id = i;
340 vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
343 /* Chain all the descriptors in the ring with an END */
345 vring_desc_init_split(struct vring_desc *dp, uint16_t n)
349 for (i = 0; i < n - 1; i++)
350 dp[i].next = (uint16_t)(i + 1);
351 dp[i].next = VQ_RING_DESC_CHAIN_END;
355 vring_desc_init_indirect_packed(struct vring_packed_desc *dp, int n)
358 for (i = 0; i < n; i++) {
359 dp[i].id = (uint16_t)i;
360 dp[i].flags = VRING_DESC_F_WRITE;
365 * Tell the backend not to interrupt us. Implementation for packed virtqueues.
368 virtqueue_disable_intr_packed(struct virtqueue *vq)
370 if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
371 vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
372 vq->vq_packed.ring.driver->desc_event_flags =
373 vq->vq_packed.event_flags_shadow;
378 * Tell the backend not to interrupt us. Implementation for split virtqueues.
381 virtqueue_disable_intr_split(struct virtqueue *vq)
383 vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
387 * Tell the backend not to interrupt us.
390 virtqueue_disable_intr(struct virtqueue *vq)
392 if (virtio_with_packed_queue(vq->hw))
393 virtqueue_disable_intr_packed(vq);
395 virtqueue_disable_intr_split(vq);
399 * Tell the backend to interrupt. Implementation for packed virtqueues.
402 virtqueue_enable_intr_packed(struct virtqueue *vq)
404 if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
405 vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
406 vq->vq_packed.ring.driver->desc_event_flags =
407 vq->vq_packed.event_flags_shadow;
412 * Tell the backend to interrupt. Implementation for split virtqueues.
415 virtqueue_enable_intr_split(struct virtqueue *vq)
417 vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
421 * Tell the backend to interrupt us.
424 virtqueue_enable_intr(struct virtqueue *vq)
426 if (virtio_with_packed_queue(vq->hw))
427 virtqueue_enable_intr_packed(vq);
429 virtqueue_enable_intr_split(vq);
433 * Dump virtqueue internal structures, for debug purpose only.
435 void virtqueue_dump(struct virtqueue *vq);
437 * Get all mbufs to be freed.
439 struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
441 /* Flush the elements in the used ring. */
442 void virtqueue_rxvq_flush(struct virtqueue *vq);
444 int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
446 int virtqueue_txvq_reset_packed(struct virtqueue *vq);
449 virtqueue_full(const struct virtqueue *vq)
451 return vq->vq_free_cnt == 0;
455 virtio_get_queue_type(struct virtio_hw *hw, uint16_t vq_idx)
457 if (vq_idx == hw->max_queue_pairs * 2)
459 else if (vq_idx % 2 == 0)
465 /* virtqueue_nused has load-acquire or rte_io_rmb insed */
466 static inline uint16_t
467 virtqueue_nused(const struct virtqueue *vq)
471 if (vq->hw->weak_barriers) {
473 * x86 prefers to using rte_smp_rmb over __atomic_load_n as it
474 * reports a slightly better perf, which comes from the saved
475 * branch by the compiler.
476 * The if and else branches are identical with the smp and io
477 * barriers both defined as compiler barriers on x86.
479 #ifdef RTE_ARCH_X86_64
480 idx = vq->vq_split.ring.used->idx;
483 idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx,
487 idx = vq->vq_split.ring.used->idx;
490 return idx - vq->vq_used_cons_idx;
493 void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
494 void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
495 void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
499 vq_update_avail_idx(struct virtqueue *vq)
501 if (vq->hw->weak_barriers) {
502 /* x86 prefers to using rte_smp_wmb over __atomic_store_n as
503 * it reports a slightly better perf, which comes from the
504 * saved branch by the compiler.
505 * The if and else branches are identical with the smp and
506 * io barriers both defined as compiler barriers on x86.
508 #ifdef RTE_ARCH_X86_64
510 vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
512 __atomic_store_n(&vq->vq_split.ring.avail->idx,
513 vq->vq_avail_idx, __ATOMIC_RELEASE);
517 vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
522 vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
526 * Place the head of the descriptor chain into the next slot and make
527 * it usable to the host. The chain is made available now rather than
528 * deferring to virtqueue_notify() in the hopes that if the host is
529 * currently running on another CPU, we can keep it processing the new
532 avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
533 if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx))
534 vq->vq_split.ring.avail->ring[avail_idx] = desc_idx;
539 virtqueue_kick_prepare(struct virtqueue *vq)
542 * Ensure updated avail->idx is visible to vhost before reading
545 virtio_mb(vq->hw->weak_barriers);
546 return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY);
550 virtqueue_kick_prepare_packed(struct virtqueue *vq)
555 * Ensure updated data is visible to vhost before reading the flags.
557 virtio_mb(vq->hw->weak_barriers);
558 flags = vq->vq_packed.ring.device->desc_event_flags;
560 return flags != RING_EVENT_FLAGS_DISABLE;
564 * virtqueue_kick_prepare*() or the virtio_wmb() should be called
565 * before this function to be sure that all the data is visible to vhost.
568 virtqueue_notify(struct virtqueue *vq)
570 VIRTIO_OPS(vq->hw)->notify_queue(vq->hw, vq);
573 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
574 #define VIRTQUEUE_DUMP(vq) do { \
575 uint16_t used_idx, nused; \
576 used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
578 nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
579 if (virtio_with_packed_queue((vq)->hw)) { \
580 PMD_INIT_LOG(DEBUG, \
581 "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
582 " cached_flags=0x%x; used_wrap_counter=%d", \
583 (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
584 (vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \
585 (vq)->vq_packed.used_wrap_counter); \
588 PMD_INIT_LOG(DEBUG, \
589 "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
590 " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
591 " avail.flags=0x%x; used.flags=0x%x", \
592 (vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
593 (vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
594 __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \
595 (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
598 #define VIRTQUEUE_DUMP(vq) do { } while (0)
601 /* avoid write operation when necessary, to lessen cache issues */
602 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
603 typeof(var) *const var_ = &(var); \
604 typeof(val) const val_ = (val); \
609 #define virtqueue_clear_net_hdr(hdr) do { \
610 typeof(hdr) hdr_ = (hdr); \
611 ASSIGN_UNLESS_EQUAL((hdr_)->csum_start, 0); \
612 ASSIGN_UNLESS_EQUAL((hdr_)->csum_offset, 0); \
613 ASSIGN_UNLESS_EQUAL((hdr_)->flags, 0); \
614 ASSIGN_UNLESS_EQUAL((hdr_)->gso_type, 0); \
615 ASSIGN_UNLESS_EQUAL((hdr_)->gso_size, 0); \
616 ASSIGN_UNLESS_EQUAL((hdr_)->hdr_len, 0); \
620 virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie)
622 uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
624 if (cookie->ol_flags & PKT_TX_TCP_SEG)
625 csum_l4 |= PKT_TX_TCP_CKSUM;
628 case PKT_TX_UDP_CKSUM:
629 hdr->csum_start = cookie->l2_len + cookie->l3_len;
630 hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
631 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
634 case PKT_TX_TCP_CKSUM:
635 hdr->csum_start = cookie->l2_len + cookie->l3_len;
636 hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
637 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
641 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
642 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
643 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
647 /* TCP Segmentation Offload */
648 if (cookie->ol_flags & PKT_TX_TCP_SEG) {
649 hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
650 VIRTIO_NET_HDR_GSO_TCPV6 :
651 VIRTIO_NET_HDR_GSO_TCPV4;
652 hdr->gso_size = cookie->tso_segsz;
653 hdr->hdr_len = cookie->l2_len + cookie->l3_len + cookie->l4_len;
655 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
656 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
657 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
662 virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
663 uint16_t needed, int use_indirect, int can_push,
666 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
667 struct vq_desc_extra *dxp;
668 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
669 struct vring_packed_desc *start_dp, *head_dp;
670 uint16_t idx, id, head_idx, head_flags;
671 int16_t head_size = vq->hw->vtnet_hdr_size;
672 struct virtio_net_hdr *hdr;
674 bool prepend_header = false;
675 uint16_t seg_num = cookie->nb_segs;
677 id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
679 dxp = &vq->vq_descx[id];
680 dxp->ndescs = needed;
681 dxp->cookie = cookie;
683 head_idx = vq->vq_avail_idx;
686 start_dp = vq->vq_packed.ring.desc;
688 head_dp = &vq->vq_packed.ring.desc[idx];
689 head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
690 head_flags |= vq->vq_packed.cached_flags;
693 /* prepend cannot fail, checked by caller */
694 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
696 prepend_header = true;
698 /* if offload disabled, it is not zeroed below, do it now */
699 if (!vq->hw->has_tx_offload)
700 virtqueue_clear_net_hdr(hdr);
701 } else if (use_indirect) {
702 /* setup tx ring slot to point to indirect
703 * descriptor list stored in reserved region.
705 * the first slot in indirect ring is already preset
706 * to point to the header in reserved region
708 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
709 RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
710 start_dp[idx].len = (seg_num + 1) *
711 sizeof(struct vring_packed_desc);
712 /* reset flags for indirect desc */
713 head_flags = VRING_DESC_F_INDIRECT;
714 head_flags |= vq->vq_packed.cached_flags;
715 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
717 /* loop below will fill in rest of the indirect elements */
718 start_dp = txr[idx].tx_packed_indir;
721 /* setup first tx ring slot to point to header
722 * stored in reserved region.
724 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
725 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
726 start_dp[idx].len = vq->hw->vtnet_hdr_size;
727 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
729 if (idx >= vq->vq_nentries) {
730 idx -= vq->vq_nentries;
731 vq->vq_packed.cached_flags ^=
732 VRING_PACKED_DESC_F_AVAIL_USED;
736 if (vq->hw->has_tx_offload)
737 virtqueue_xmit_offload(hdr, cookie);
742 start_dp[idx].addr = rte_mbuf_data_iova(cookie);
743 start_dp[idx].len = cookie->data_len;
744 if (prepend_header) {
745 start_dp[idx].addr -= head_size;
746 start_dp[idx].len += head_size;
747 prepend_header = false;
750 if (likely(idx != head_idx)) {
751 flags = cookie->next ? VRING_DESC_F_NEXT : 0;
752 flags |= vq->vq_packed.cached_flags;
753 start_dp[idx].flags = flags;
757 if (idx >= vq->vq_nentries) {
758 idx -= vq->vq_nentries;
759 vq->vq_packed.cached_flags ^=
760 VRING_PACKED_DESC_F_AVAIL_USED;
762 } while ((cookie = cookie->next) != NULL);
764 start_dp[prev].id = id;
768 if (++idx >= vq->vq_nentries) {
769 idx -= vq->vq_nentries;
770 vq->vq_packed.cached_flags ^=
771 VRING_PACKED_DESC_F_AVAIL_USED;
775 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
776 vq->vq_avail_idx = idx;
779 vq->vq_desc_head_idx = dxp->next;
780 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
781 vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
784 virtqueue_store_flags_packed(head_dp, head_flags,
785 vq->hw->weak_barriers);
789 vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
791 struct vq_desc_extra *dxp;
793 dxp = &vq->vq_descx[id];
794 vq->vq_free_cnt += dxp->ndescs;
796 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
797 vq->vq_desc_head_idx = id;
799 vq->vq_descx[vq->vq_desc_tail_idx].next = id;
801 vq->vq_desc_tail_idx = id;
802 dxp->next = VQ_RING_DESC_CHAIN_END;
806 virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
808 uint16_t used_idx, id, curr_id, free_cnt = 0;
809 uint16_t size = vq->vq_nentries;
810 struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
811 struct vq_desc_extra *dxp;
813 used_idx = vq->vq_used_cons_idx;
814 /* desc_is_used has a load-acquire or rte_io_rmb inside
815 * and wait for used desc in virtqueue.
817 while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
818 id = desc[used_idx].id;
821 dxp = &vq->vq_descx[used_idx];
822 used_idx += dxp->ndescs;
823 free_cnt += dxp->ndescs;
825 if (used_idx >= size) {
827 vq->vq_packed.used_wrap_counter ^= 1;
829 if (dxp->cookie != NULL) {
830 rte_pktmbuf_free(dxp->cookie);
833 } while (curr_id != id);
835 vq->vq_used_cons_idx = used_idx;
836 vq->vq_free_cnt += free_cnt;
840 virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
842 uint16_t used_idx, id;
843 uint16_t size = vq->vq_nentries;
844 struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
845 struct vq_desc_extra *dxp;
847 used_idx = vq->vq_used_cons_idx;
848 /* desc_is_used has a load-acquire or rte_io_rmb inside
849 * and wait for used desc in virtqueue.
851 while (num-- && desc_is_used(&desc[used_idx], vq)) {
852 id = desc[used_idx].id;
853 dxp = &vq->vq_descx[id];
854 vq->vq_used_cons_idx += dxp->ndescs;
855 if (vq->vq_used_cons_idx >= size) {
856 vq->vq_used_cons_idx -= size;
857 vq->vq_packed.used_wrap_counter ^= 1;
859 vq_ring_free_id_packed(vq, id);
860 if (dxp->cookie != NULL) {
861 rte_pktmbuf_free(dxp->cookie);
864 used_idx = vq->vq_used_cons_idx;
868 /* Cleanup from completed transmits. */
870 virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)
873 virtio_xmit_cleanup_inorder_packed(vq, num);
875 virtio_xmit_cleanup_normal_packed(vq, num);
879 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
881 uint16_t i, used_idx, desc_idx;
882 for (i = 0; i < num; i++) {
883 struct vring_used_elem *uep;
884 struct vq_desc_extra *dxp;
886 used_idx = (uint16_t)(vq->vq_used_cons_idx &
887 (vq->vq_nentries - 1));
888 uep = &vq->vq_split.ring.used->ring[used_idx];
890 desc_idx = (uint16_t)uep->id;
891 dxp = &vq->vq_descx[desc_idx];
892 vq->vq_used_cons_idx++;
893 vq_ring_free_chain(vq, desc_idx);
895 if (dxp->cookie != NULL) {
896 rte_pktmbuf_free(dxp->cookie);
902 /* Cleanup from completed inorder transmits. */
903 static __rte_always_inline void
904 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
906 uint16_t i, idx = vq->vq_used_cons_idx;
907 int16_t free_cnt = 0;
908 struct vq_desc_extra *dxp = NULL;
910 if (unlikely(num == 0))
913 for (i = 0; i < num; i++) {
914 dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
915 free_cnt += dxp->ndescs;
916 if (dxp->cookie != NULL) {
917 rte_pktmbuf_free(dxp->cookie);
922 vq->vq_free_cnt += free_cnt;
923 vq->vq_used_cons_idx = idx;
925 #endif /* _VIRTQUEUE_H_ */