1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <rte_atomic.h>
11 #include <rte_memory.h>
12 #include <rte_mempool.h>
15 #include "virtio_pci.h"
16 #include "virtio_ring.h"
17 #include "virtio_logs.h"
18 #include "virtio_rxtx.h"
22 #define DEFAULT_TX_FREE_THRESH 32
23 #define DEFAULT_RX_FREE_THRESH 32
25 #define VIRTIO_MBUF_BURST_SZ 64
27 * Per virtio_ring.h in Linux.
28 * For virtio_pci on SMP, we don't need to order with respect to MMIO
29 * accesses through relaxed memory I/O windows, so smp_mb() et al are
32 * For using virtio to talk to real devices (eg. vDPA) we do need real
36 virtio_mb(uint8_t weak_barriers)
45 virtio_rmb(uint8_t weak_barriers)
54 virtio_wmb(uint8_t weak_barriers)
62 static inline uint16_t
63 virtqueue_fetch_flags_packed(struct vring_packed_desc *dp,
64 uint8_t weak_barriers)
69 /* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports
70 * a better perf(~1.5%), which comes from the saved branch by the compiler.
71 * The if and else branch are identical on the platforms except Arm.
74 flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
88 virtqueue_store_flags_packed(struct vring_packed_desc *dp,
89 uint16_t flags, uint8_t weak_barriers)
92 /* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports
93 * a better perf(~1.5%), which comes from the saved branch by the compiler.
94 * The if and else branch are identical on the platforms except Arm.
97 __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
108 #ifdef RTE_PMD_PACKET_PREFETCH
109 #define rte_packet_prefetch(p) rte_prefetch1(p)
111 #define rte_packet_prefetch(p) do {} while(0)
114 #define VIRTQUEUE_MAX_NAME_SZ 32
116 #ifdef RTE_VIRTIO_USER
118 * Return the physical address (or virtual address in case of
119 * virtio-user) of mbuf data buffer.
121 * The address is firstly casted to the word size (sizeof(uintptr_t))
122 * before casting it to uint64_t. This is to make it work with different
123 * combination of word size (64 bit and 32 bit) and virtio device
124 * (virtio-pci and virtio-user).
126 #define VIRTIO_MBUF_ADDR(mb, vq) \
127 ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
129 #define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova)
133 * Return the physical address (or virtual address in case of
134 * virtio-user) of mbuf data buffer, taking care of mbuf data offset
136 #define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
137 (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
139 #define VTNET_SQ_RQ_QUEUE_IDX 0
140 #define VTNET_SQ_TQ_QUEUE_IDX 1
141 #define VTNET_SQ_CQ_QUEUE_IDX 2
143 enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
145 * The maximum virtqueue size is 2^15. Use that value as the end of
146 * descriptor chain terminator since it will never be a valid index
147 * in the descriptor table. This is used to verify we are correctly
148 * handling vq_free_cnt.
150 #define VQ_RING_DESC_CHAIN_END 32768
153 * Control the RX mode, ie. promiscuous, allmulti, etc...
154 * All commands require an "out" sg entry containing a 1 byte
155 * state value, zero = disable, non-zero = enable. Commands
156 * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
157 * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
159 #define VIRTIO_NET_CTRL_RX 0
160 #define VIRTIO_NET_CTRL_RX_PROMISC 0
161 #define VIRTIO_NET_CTRL_RX_ALLMULTI 1
162 #define VIRTIO_NET_CTRL_RX_ALLUNI 2
163 #define VIRTIO_NET_CTRL_RX_NOMULTI 3
164 #define VIRTIO_NET_CTRL_RX_NOUNI 4
165 #define VIRTIO_NET_CTRL_RX_NOBCAST 5
170 * The MAC filter table is managed by the hypervisor, the guest should
171 * assume the size is infinite. Filtering should be considered
172 * non-perfect, ie. based on hypervisor resources, the guest may
173 * received packets from sources not specified in the filter list.
175 * In addition to the class/cmd header, the TABLE_SET command requires
176 * two out scatterlists. Each contains a 4 byte count of entries followed
177 * by a concatenated byte stream of the ETH_ALEN MAC addresses. The
178 * first sg list contains unicast addresses, the second is for multicast.
179 * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
182 * The ADDR_SET command requests one out scatterlist, it contains a
183 * 6 bytes MAC address. This functionality is present if the
184 * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available.
186 struct virtio_net_ctrl_mac {
188 uint8_t macs[][RTE_ETHER_ADDR_LEN];
191 #define VIRTIO_NET_CTRL_MAC 1
192 #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
193 #define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
196 * Control VLAN filtering
198 * The VLAN filter table is controlled via a simple ADD/DEL interface.
199 * VLAN IDs not added may be filtered by the hypervisor. Del is the
200 * opposite of add. Both commands expect an out entry containing a 2
201 * byte VLAN ID. VLAN filtering is available with the
202 * VIRTIO_NET_F_CTRL_VLAN feature bit.
204 #define VIRTIO_NET_CTRL_VLAN 2
205 #define VIRTIO_NET_CTRL_VLAN_ADD 0
206 #define VIRTIO_NET_CTRL_VLAN_DEL 1
209 * Control link announce acknowledgement
211 * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
212 * driver has recevied the notification; device would clear the
213 * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
216 #define VIRTIO_NET_CTRL_ANNOUNCE 3
217 #define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
219 struct virtio_net_ctrl_hdr {
224 typedef uint8_t virtio_net_ctrl_ack;
226 #define VIRTIO_NET_OK 0
227 #define VIRTIO_NET_ERR 1
229 #define VIRTIO_MAX_CTRL_DATA 2048
231 struct virtio_pmd_ctrl {
232 struct virtio_net_ctrl_hdr hdr;
233 virtio_net_ctrl_ack status;
234 uint8_t data[VIRTIO_MAX_CTRL_DATA];
237 struct vq_desc_extra {
244 struct virtio_hw *hw; /**< virtio_hw structure pointer. */
247 /**< vring keeping desc, used and avail */
252 /**< vring keeping descs and events */
253 struct vring_packed ring;
254 bool used_wrap_counter;
255 uint16_t cached_flags; /**< cached flags for descs */
256 uint16_t event_flags_shadow;
260 uint16_t vq_used_cons_idx; /**< last consumed descriptor */
261 uint16_t vq_nentries; /**< vring desc numbers */
262 uint16_t vq_free_cnt; /**< num of desc available */
263 uint16_t vq_avail_idx; /**< sync until needed */
264 uint16_t vq_free_thresh; /**< free threshold */
266 void *vq_ring_virt_mem; /**< linear address of vring*/
267 unsigned int vq_ring_size;
270 struct virtnet_rx rxq;
271 struct virtnet_tx txq;
272 struct virtnet_ctl cq;
275 rte_iova_t vq_ring_mem; /**< physical address of vring,
276 * or virtual address for virtio_user. */
279 * Head of the free chain in the descriptor table. If
280 * there are no free descriptors, this will be set to
281 * VQ_RING_DESC_CHAIN_END.
283 uint16_t vq_desc_head_idx;
284 uint16_t vq_desc_tail_idx;
285 uint16_t vq_queue_index; /**< PCI queue index */
286 uint16_t offset; /**< relative offset to obtain addr in mbuf */
287 uint16_t *notify_addr;
288 struct rte_mbuf **sw_ring; /**< RX software ring. */
289 struct vq_desc_extra vq_descx[0];
292 /* If multiqueue is provided by host, then we suppport it. */
293 #define VIRTIO_NET_CTRL_MQ 4
294 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
295 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
296 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
299 * This is the first element of the scatter-gather list. If you don't
300 * specify GSO or CSUM features, you can simply ignore the header.
302 struct virtio_net_hdr {
303 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /**< Use csum_start,csum_offset*/
304 #define VIRTIO_NET_HDR_F_DATA_VALID 2 /**< Checksum is valid */
306 #define VIRTIO_NET_HDR_GSO_NONE 0 /**< Not a GSO frame */
307 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /**< GSO frame, IPv4 TCP (TSO) */
308 #define VIRTIO_NET_HDR_GSO_UDP 3 /**< GSO frame, IPv4 UDP (UFO) */
309 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /**< GSO frame, IPv6 TCP */
310 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /**< TCP has ECN set */
312 uint16_t hdr_len; /**< Ethernet + IP + tcp/udp hdrs */
313 uint16_t gso_size; /**< Bytes to append to hdr_len per frame */
314 uint16_t csum_start; /**< Position to start checksumming from */
315 uint16_t csum_offset; /**< Offset after that to place checksum */
319 * This is the version of the header to use when the MRG_RXBUF
320 * feature has been negotiated.
322 struct virtio_net_hdr_mrg_rxbuf {
323 struct virtio_net_hdr hdr;
324 uint16_t num_buffers; /**< Number of merged rx buffers */
327 /* Region reserved to allow for transmit header and indirect ring */
328 #define VIRTIO_MAX_TX_INDIRECT 8
329 struct virtio_tx_region {
330 struct virtio_net_hdr_mrg_rxbuf tx_hdr;
332 struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT];
333 struct vring_packed_desc
334 tx_packed_indir[VIRTIO_MAX_TX_INDIRECT];
339 desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
341 uint16_t used, avail, flags;
343 flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers);
344 used = !!(flags & VRING_PACKED_DESC_F_USED);
345 avail = !!(flags & VRING_PACKED_DESC_F_AVAIL);
347 return avail == used && used == vq->vq_packed.used_wrap_counter;
351 vring_desc_init_packed(struct virtqueue *vq, int n)
354 for (i = 0; i < n - 1; i++) {
355 vq->vq_packed.ring.desc[i].id = i;
356 vq->vq_descx[i].next = i + 1;
358 vq->vq_packed.ring.desc[i].id = i;
359 vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
362 /* Chain all the descriptors in the ring with an END */
364 vring_desc_init_split(struct vring_desc *dp, uint16_t n)
368 for (i = 0; i < n - 1; i++)
369 dp[i].next = (uint16_t)(i + 1);
370 dp[i].next = VQ_RING_DESC_CHAIN_END;
374 vring_desc_init_indirect_packed(struct vring_packed_desc *dp, int n)
377 for (i = 0; i < n; i++) {
378 dp[i].id = (uint16_t)i;
379 dp[i].flags = VRING_DESC_F_WRITE;
384 * Tell the backend not to interrupt us. Implementation for packed virtqueues.
387 virtqueue_disable_intr_packed(struct virtqueue *vq)
389 if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
390 vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
391 vq->vq_packed.ring.driver->desc_event_flags =
392 vq->vq_packed.event_flags_shadow;
397 * Tell the backend not to interrupt us. Implementation for split virtqueues.
400 virtqueue_disable_intr_split(struct virtqueue *vq)
402 vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
406 * Tell the backend not to interrupt us.
409 virtqueue_disable_intr(struct virtqueue *vq)
411 if (vtpci_packed_queue(vq->hw))
412 virtqueue_disable_intr_packed(vq);
414 virtqueue_disable_intr_split(vq);
418 * Tell the backend to interrupt. Implementation for packed virtqueues.
421 virtqueue_enable_intr_packed(struct virtqueue *vq)
423 if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
424 vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
425 vq->vq_packed.ring.driver->desc_event_flags =
426 vq->vq_packed.event_flags_shadow;
431 * Tell the backend to interrupt. Implementation for split virtqueues.
434 virtqueue_enable_intr_split(struct virtqueue *vq)
436 vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
440 * Tell the backend to interrupt us.
443 virtqueue_enable_intr(struct virtqueue *vq)
445 if (vtpci_packed_queue(vq->hw))
446 virtqueue_enable_intr_packed(vq);
448 virtqueue_enable_intr_split(vq);
452 * Dump virtqueue internal structures, for debug purpose only.
454 void virtqueue_dump(struct virtqueue *vq);
456 * Get all mbufs to be freed.
458 struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
460 /* Flush the elements in the used ring. */
461 void virtqueue_rxvq_flush(struct virtqueue *vq);
463 int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
465 int virtqueue_txvq_reset_packed(struct virtqueue *vq);
468 virtqueue_full(const struct virtqueue *vq)
470 return vq->vq_free_cnt == 0;
474 virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
476 if (vtpci_queue_idx == hw->max_queue_pairs * 2)
478 else if (vtpci_queue_idx % 2 == 0)
484 /* virtqueue_nused has load-acquire or rte_io_rmb insed */
485 static inline uint16_t
486 virtqueue_nused(const struct virtqueue *vq)
490 if (vq->hw->weak_barriers) {
492 * x86 prefers to using rte_smp_rmb over __atomic_load_n as it
493 * reports a slightly better perf, which comes from the saved
494 * branch by the compiler.
495 * The if and else branches are identical with the smp and io
496 * barriers both defined as compiler barriers on x86.
498 #ifdef RTE_ARCH_X86_64
499 idx = vq->vq_split.ring.used->idx;
502 idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx,
506 idx = vq->vq_split.ring.used->idx;
509 return idx - vq->vq_used_cons_idx;
512 void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
513 void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
514 void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
518 vq_update_avail_idx(struct virtqueue *vq)
520 if (vq->hw->weak_barriers) {
521 /* x86 prefers to using rte_smp_wmb over __atomic_store_n as
522 * it reports a slightly better perf, which comes from the
523 * saved branch by the compiler.
524 * The if and else branches are identical with the smp and
525 * io barriers both defined as compiler barriers on x86.
527 #ifdef RTE_ARCH_X86_64
529 vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
531 __atomic_store_n(&vq->vq_split.ring.avail->idx,
532 vq->vq_avail_idx, __ATOMIC_RELEASE);
536 vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
541 vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
545 * Place the head of the descriptor chain into the next slot and make
546 * it usable to the host. The chain is made available now rather than
547 * deferring to virtqueue_notify() in the hopes that if the host is
548 * currently running on another CPU, we can keep it processing the new
551 avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
552 if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx))
553 vq->vq_split.ring.avail->ring[avail_idx] = desc_idx;
558 virtqueue_kick_prepare(struct virtqueue *vq)
561 * Ensure updated avail->idx is visible to vhost before reading
564 virtio_mb(vq->hw->weak_barriers);
565 return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY);
569 virtqueue_kick_prepare_packed(struct virtqueue *vq)
574 * Ensure updated data is visible to vhost before reading the flags.
576 virtio_mb(vq->hw->weak_barriers);
577 flags = vq->vq_packed.ring.device->desc_event_flags;
579 return flags != RING_EVENT_FLAGS_DISABLE;
583 * virtqueue_kick_prepare*() or the virtio_wmb() should be called
584 * before this function to be sure that all the data is visible to vhost.
587 virtqueue_notify(struct virtqueue *vq)
589 VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
592 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
593 #define VIRTQUEUE_DUMP(vq) do { \
594 uint16_t used_idx, nused; \
595 used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
597 nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
598 if (vtpci_packed_queue((vq)->hw)) { \
599 PMD_INIT_LOG(DEBUG, \
600 "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
601 " cached_flags=0x%x; used_wrap_counter=%d", \
602 (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
603 (vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \
604 (vq)->vq_packed.used_wrap_counter); \
607 PMD_INIT_LOG(DEBUG, \
608 "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
609 " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
610 " avail.flags=0x%x; used.flags=0x%x", \
611 (vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
612 (vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
613 __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \
614 (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
617 #define VIRTQUEUE_DUMP(vq) do { } while (0)
620 /* avoid write operation when necessary, to lessen cache issues */
621 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
622 typeof(var) *const var_ = &(var); \
623 typeof(val) const val_ = (val); \
628 #define virtqueue_clear_net_hdr(hdr) do { \
629 typeof(hdr) hdr_ = (hdr); \
630 ASSIGN_UNLESS_EQUAL((hdr_)->csum_start, 0); \
631 ASSIGN_UNLESS_EQUAL((hdr_)->csum_offset, 0); \
632 ASSIGN_UNLESS_EQUAL((hdr_)->flags, 0); \
633 ASSIGN_UNLESS_EQUAL((hdr_)->gso_type, 0); \
634 ASSIGN_UNLESS_EQUAL((hdr_)->gso_size, 0); \
635 ASSIGN_UNLESS_EQUAL((hdr_)->hdr_len, 0); \
639 virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
640 struct rte_mbuf *cookie,
644 if (cookie->ol_flags & PKT_TX_TCP_SEG)
645 cookie->ol_flags |= PKT_TX_TCP_CKSUM;
647 switch (cookie->ol_flags & PKT_TX_L4_MASK) {
648 case PKT_TX_UDP_CKSUM:
649 hdr->csum_start = cookie->l2_len + cookie->l3_len;
650 hdr->csum_offset = offsetof(struct rte_udp_hdr,
652 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
655 case PKT_TX_TCP_CKSUM:
656 hdr->csum_start = cookie->l2_len + cookie->l3_len;
657 hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
658 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
662 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
663 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
664 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
668 /* TCP Segmentation Offload */
669 if (cookie->ol_flags & PKT_TX_TCP_SEG) {
670 hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
671 VIRTIO_NET_HDR_GSO_TCPV6 :
672 VIRTIO_NET_HDR_GSO_TCPV4;
673 hdr->gso_size = cookie->tso_segsz;
679 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
680 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
681 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
687 virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
688 uint16_t needed, int use_indirect, int can_push,
691 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
692 struct vq_desc_extra *dxp;
693 struct virtqueue *vq = txvq->vq;
694 struct vring_packed_desc *start_dp, *head_dp;
695 uint16_t idx, id, head_idx, head_flags;
696 int16_t head_size = vq->hw->vtnet_hdr_size;
697 struct virtio_net_hdr *hdr;
699 bool prepend_header = false;
700 uint16_t seg_num = cookie->nb_segs;
702 id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
704 dxp = &vq->vq_descx[id];
705 dxp->ndescs = needed;
706 dxp->cookie = cookie;
708 head_idx = vq->vq_avail_idx;
711 start_dp = vq->vq_packed.ring.desc;
713 head_dp = &vq->vq_packed.ring.desc[idx];
714 head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
715 head_flags |= vq->vq_packed.cached_flags;
718 /* prepend cannot fail, checked by caller */
719 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
721 prepend_header = true;
723 /* if offload disabled, it is not zeroed below, do it now */
724 if (!vq->hw->has_tx_offload)
725 virtqueue_clear_net_hdr(hdr);
726 } else if (use_indirect) {
727 /* setup tx ring slot to point to indirect
728 * descriptor list stored in reserved region.
730 * the first slot in indirect ring is already preset
731 * to point to the header in reserved region
733 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
734 RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
735 start_dp[idx].len = (seg_num + 1) *
736 sizeof(struct vring_packed_desc);
737 /* reset flags for indirect desc */
738 head_flags = VRING_DESC_F_INDIRECT;
739 head_flags |= vq->vq_packed.cached_flags;
740 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
742 /* loop below will fill in rest of the indirect elements */
743 start_dp = txr[idx].tx_packed_indir;
746 /* setup first tx ring slot to point to header
747 * stored in reserved region.
749 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
750 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
751 start_dp[idx].len = vq->hw->vtnet_hdr_size;
752 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
754 if (idx >= vq->vq_nentries) {
755 idx -= vq->vq_nentries;
756 vq->vq_packed.cached_flags ^=
757 VRING_PACKED_DESC_F_AVAIL_USED;
761 virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
766 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
767 start_dp[idx].len = cookie->data_len;
768 if (prepend_header) {
769 start_dp[idx].addr -= head_size;
770 start_dp[idx].len += head_size;
771 prepend_header = false;
774 if (likely(idx != head_idx)) {
775 flags = cookie->next ? VRING_DESC_F_NEXT : 0;
776 flags |= vq->vq_packed.cached_flags;
777 start_dp[idx].flags = flags;
781 if (idx >= vq->vq_nentries) {
782 idx -= vq->vq_nentries;
783 vq->vq_packed.cached_flags ^=
784 VRING_PACKED_DESC_F_AVAIL_USED;
786 } while ((cookie = cookie->next) != NULL);
788 start_dp[prev].id = id;
792 if (++idx >= vq->vq_nentries) {
793 idx -= vq->vq_nentries;
794 vq->vq_packed.cached_flags ^=
795 VRING_PACKED_DESC_F_AVAIL_USED;
799 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
800 vq->vq_avail_idx = idx;
803 vq->vq_desc_head_idx = dxp->next;
804 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
805 vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
808 virtqueue_store_flags_packed(head_dp, head_flags,
809 vq->hw->weak_barriers);
813 vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
815 struct vq_desc_extra *dxp;
817 dxp = &vq->vq_descx[id];
818 vq->vq_free_cnt += dxp->ndescs;
820 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
821 vq->vq_desc_head_idx = id;
823 vq->vq_descx[vq->vq_desc_tail_idx].next = id;
825 vq->vq_desc_tail_idx = id;
826 dxp->next = VQ_RING_DESC_CHAIN_END;
830 virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
832 uint16_t used_idx, id, curr_id, free_cnt = 0;
833 uint16_t size = vq->vq_nentries;
834 struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
835 struct vq_desc_extra *dxp;
837 used_idx = vq->vq_used_cons_idx;
838 /* desc_is_used has a load-acquire or rte_io_rmb inside
839 * and wait for used desc in virtqueue.
841 while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
842 id = desc[used_idx].id;
845 dxp = &vq->vq_descx[used_idx];
846 used_idx += dxp->ndescs;
847 free_cnt += dxp->ndescs;
849 if (used_idx >= size) {
851 vq->vq_packed.used_wrap_counter ^= 1;
853 if (dxp->cookie != NULL) {
854 rte_pktmbuf_free(dxp->cookie);
857 } while (curr_id != id);
859 vq->vq_used_cons_idx = used_idx;
860 vq->vq_free_cnt += free_cnt;
864 virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
866 uint16_t used_idx, id;
867 uint16_t size = vq->vq_nentries;
868 struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
869 struct vq_desc_extra *dxp;
871 used_idx = vq->vq_used_cons_idx;
872 /* desc_is_used has a load-acquire or rte_io_rmb inside
873 * and wait for used desc in virtqueue.
875 while (num-- && desc_is_used(&desc[used_idx], vq)) {
876 id = desc[used_idx].id;
877 dxp = &vq->vq_descx[id];
878 vq->vq_used_cons_idx += dxp->ndescs;
879 if (vq->vq_used_cons_idx >= size) {
880 vq->vq_used_cons_idx -= size;
881 vq->vq_packed.used_wrap_counter ^= 1;
883 vq_ring_free_id_packed(vq, id);
884 if (dxp->cookie != NULL) {
885 rte_pktmbuf_free(dxp->cookie);
888 used_idx = vq->vq_used_cons_idx;
892 /* Cleanup from completed transmits. */
894 virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)
897 virtio_xmit_cleanup_inorder_packed(vq, num);
899 virtio_xmit_cleanup_normal_packed(vq, num);
903 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
905 uint16_t i, used_idx, desc_idx;
906 for (i = 0; i < num; i++) {
907 struct vring_used_elem *uep;
908 struct vq_desc_extra *dxp;
910 used_idx = (uint16_t)(vq->vq_used_cons_idx &
911 (vq->vq_nentries - 1));
912 uep = &vq->vq_split.ring.used->ring[used_idx];
914 desc_idx = (uint16_t)uep->id;
915 dxp = &vq->vq_descx[desc_idx];
916 vq->vq_used_cons_idx++;
917 vq_ring_free_chain(vq, desc_idx);
919 if (dxp->cookie != NULL) {
920 rte_pktmbuf_free(dxp->cookie);
926 /* Cleanup from completed inorder transmits. */
927 static __rte_always_inline void
928 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
930 uint16_t i, idx = vq->vq_used_cons_idx;
931 int16_t free_cnt = 0;
932 struct vq_desc_extra *dxp = NULL;
934 if (unlikely(num == 0))
937 for (i = 0; i < num; i++) {
938 dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
939 free_cnt += dxp->ndescs;
940 if (dxp->cookie != NULL) {
941 rte_pktmbuf_free(dxp->cookie);
946 vq->vq_free_cnt += free_cnt;
947 vq->vq_used_cons_idx = idx;
949 #endif /* _VIRTQUEUE_H_ */