1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <rte_atomic.h>
11 #include <rte_memory.h>
12 #include <rte_mempool.h>
15 #include "virtio_pci.h"
16 #include "virtio_ring.h"
17 #include "virtio_logs.h"
18 #include "virtio_rxtx.h"
22 #define DEFAULT_TX_FREE_THRESH 32
23 #define DEFAULT_RX_FREE_THRESH 32
25 #define VIRTIO_MBUF_BURST_SZ 64
27 * Per virtio_ring.h in Linux.
28 * For virtio_pci on SMP, we don't need to order with respect to MMIO
29 * accesses through relaxed memory I/O windows, so smp_mb() et al are
32 * For using virtio to talk to real devices (eg. vDPA) we do need real
36 virtio_mb(uint8_t weak_barriers)
45 virtio_rmb(uint8_t weak_barriers)
54 virtio_wmb(uint8_t weak_barriers)
62 static inline uint16_t
63 virtqueue_fetch_flags_packed(struct vring_packed_desc *dp,
64 uint8_t weak_barriers)
69 /* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports
70 * a better perf(~1.5%), which comes from the saved branch by the compiler.
71 * The if and else branch are identical with the smp and cio barriers both
72 * defined as compiler barriers on x86.
74 #ifdef RTE_ARCH_X86_64
78 flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
89 virtqueue_store_flags_packed(struct vring_packed_desc *dp,
90 uint16_t flags, uint8_t weak_barriers)
93 /* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports
94 * a better perf(~1.5%), which comes from the saved branch by the compiler.
95 * The if and else branch are identical with the smp and cio barriers both
96 * defined as compiler barriers on x86.
98 #ifdef RTE_ARCH_X86_64
102 __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
109 #ifdef RTE_PMD_PACKET_PREFETCH
110 #define rte_packet_prefetch(p) rte_prefetch1(p)
112 #define rte_packet_prefetch(p) do {} while(0)
115 #define VIRTQUEUE_MAX_NAME_SZ 32
117 #ifdef RTE_VIRTIO_USER
119 * Return the physical address (or virtual address in case of
120 * virtio-user) of mbuf data buffer.
122 * The address is firstly casted to the word size (sizeof(uintptr_t))
123 * before casting it to uint64_t. This is to make it work with different
124 * combination of word size (64 bit and 32 bit) and virtio device
125 * (virtio-pci and virtio-user).
127 #define VIRTIO_MBUF_ADDR(mb, vq) \
128 ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
130 #define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova)
134 * Return the physical address (or virtual address in case of
135 * virtio-user) of mbuf data buffer, taking care of mbuf data offset
137 #define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
138 (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
140 #define VTNET_SQ_RQ_QUEUE_IDX 0
141 #define VTNET_SQ_TQ_QUEUE_IDX 1
142 #define VTNET_SQ_CQ_QUEUE_IDX 2
144 enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
146 * The maximum virtqueue size is 2^15. Use that value as the end of
147 * descriptor chain terminator since it will never be a valid index
148 * in the descriptor table. This is used to verify we are correctly
149 * handling vq_free_cnt.
151 #define VQ_RING_DESC_CHAIN_END 32768
154 * Control the RX mode, ie. promiscuous, allmulti, etc...
155 * All commands require an "out" sg entry containing a 1 byte
156 * state value, zero = disable, non-zero = enable. Commands
157 * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
158 * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
160 #define VIRTIO_NET_CTRL_RX 0
161 #define VIRTIO_NET_CTRL_RX_PROMISC 0
162 #define VIRTIO_NET_CTRL_RX_ALLMULTI 1
163 #define VIRTIO_NET_CTRL_RX_ALLUNI 2
164 #define VIRTIO_NET_CTRL_RX_NOMULTI 3
165 #define VIRTIO_NET_CTRL_RX_NOUNI 4
166 #define VIRTIO_NET_CTRL_RX_NOBCAST 5
171 * The MAC filter table is managed by the hypervisor, the guest should
172 * assume the size is infinite. Filtering should be considered
173 * non-perfect, ie. based on hypervisor resources, the guest may
174 * received packets from sources not specified in the filter list.
176 * In addition to the class/cmd header, the TABLE_SET command requires
177 * two out scatterlists. Each contains a 4 byte count of entries followed
178 * by a concatenated byte stream of the ETH_ALEN MAC addresses. The
179 * first sg list contains unicast addresses, the second is for multicast.
180 * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
183 * The ADDR_SET command requests one out scatterlist, it contains a
184 * 6 bytes MAC address. This functionality is present if the
185 * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available.
187 struct virtio_net_ctrl_mac {
189 uint8_t macs[][RTE_ETHER_ADDR_LEN];
192 #define VIRTIO_NET_CTRL_MAC 1
193 #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
194 #define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
197 * Control VLAN filtering
199 * The VLAN filter table is controlled via a simple ADD/DEL interface.
200 * VLAN IDs not added may be filtered by the hypervisor. Del is the
201 * opposite of add. Both commands expect an out entry containing a 2
202 * byte VLAN ID. VLAN filtering is available with the
203 * VIRTIO_NET_F_CTRL_VLAN feature bit.
205 #define VIRTIO_NET_CTRL_VLAN 2
206 #define VIRTIO_NET_CTRL_VLAN_ADD 0
207 #define VIRTIO_NET_CTRL_VLAN_DEL 1
210 * Control link announce acknowledgement
212 * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
213 * driver has recevied the notification; device would clear the
214 * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
217 #define VIRTIO_NET_CTRL_ANNOUNCE 3
218 #define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
220 struct virtio_net_ctrl_hdr {
225 typedef uint8_t virtio_net_ctrl_ack;
227 #define VIRTIO_NET_OK 0
228 #define VIRTIO_NET_ERR 1
230 #define VIRTIO_MAX_CTRL_DATA 2048
232 struct virtio_pmd_ctrl {
233 struct virtio_net_ctrl_hdr hdr;
234 virtio_net_ctrl_ack status;
235 uint8_t data[VIRTIO_MAX_CTRL_DATA];
238 struct vq_desc_extra {
245 struct virtio_hw *hw; /**< virtio_hw structure pointer. */
248 /**< vring keeping desc, used and avail */
253 /**< vring keeping descs and events */
254 struct vring_packed ring;
255 bool used_wrap_counter;
256 uint16_t cached_flags; /**< cached flags for descs */
257 uint16_t event_flags_shadow;
261 uint16_t vq_used_cons_idx; /**< last consumed descriptor */
262 uint16_t vq_nentries; /**< vring desc numbers */
263 uint16_t vq_free_cnt; /**< num of desc available */
264 uint16_t vq_avail_idx; /**< sync until needed */
265 uint16_t vq_free_thresh; /**< free threshold */
267 void *vq_ring_virt_mem; /**< linear address of vring*/
268 unsigned int vq_ring_size;
271 struct virtnet_rx rxq;
272 struct virtnet_tx txq;
273 struct virtnet_ctl cq;
276 rte_iova_t vq_ring_mem; /**< physical address of vring,
277 * or virtual address for virtio_user. */
280 * Head of the free chain in the descriptor table. If
281 * there are no free descriptors, this will be set to
282 * VQ_RING_DESC_CHAIN_END.
284 uint16_t vq_desc_head_idx;
285 uint16_t vq_desc_tail_idx;
286 uint16_t vq_queue_index; /**< PCI queue index */
287 uint16_t offset; /**< relative offset to obtain addr in mbuf */
288 uint16_t *notify_addr;
289 struct rte_mbuf **sw_ring; /**< RX software ring. */
290 struct vq_desc_extra vq_descx[0];
293 /* If multiqueue is provided by host, then we suppport it. */
294 #define VIRTIO_NET_CTRL_MQ 4
295 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
296 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
297 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
300 * This is the first element of the scatter-gather list. If you don't
301 * specify GSO or CSUM features, you can simply ignore the header.
303 struct virtio_net_hdr {
304 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /**< Use csum_start,csum_offset*/
305 #define VIRTIO_NET_HDR_F_DATA_VALID 2 /**< Checksum is valid */
307 #define VIRTIO_NET_HDR_GSO_NONE 0 /**< Not a GSO frame */
308 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /**< GSO frame, IPv4 TCP (TSO) */
309 #define VIRTIO_NET_HDR_GSO_UDP 3 /**< GSO frame, IPv4 UDP (UFO) */
310 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /**< GSO frame, IPv6 TCP */
311 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /**< TCP has ECN set */
313 uint16_t hdr_len; /**< Ethernet + IP + tcp/udp hdrs */
314 uint16_t gso_size; /**< Bytes to append to hdr_len per frame */
315 uint16_t csum_start; /**< Position to start checksumming from */
316 uint16_t csum_offset; /**< Offset after that to place checksum */
320 * This is the version of the header to use when the MRG_RXBUF
321 * feature has been negotiated.
323 struct virtio_net_hdr_mrg_rxbuf {
324 struct virtio_net_hdr hdr;
325 uint16_t num_buffers; /**< Number of merged rx buffers */
328 /* Region reserved to allow for transmit header and indirect ring */
329 #define VIRTIO_MAX_TX_INDIRECT 8
330 struct virtio_tx_region {
331 struct virtio_net_hdr_mrg_rxbuf tx_hdr;
332 struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
337 desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
339 uint16_t used, avail, flags;
341 flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers);
342 used = !!(flags & VRING_PACKED_DESC_F_USED);
343 avail = !!(flags & VRING_PACKED_DESC_F_AVAIL);
345 return avail == used && used == vq->vq_packed.used_wrap_counter;
349 vring_desc_init_packed(struct virtqueue *vq, int n)
352 for (i = 0; i < n - 1; i++) {
353 vq->vq_packed.ring.desc[i].id = i;
354 vq->vq_descx[i].next = i + 1;
356 vq->vq_packed.ring.desc[i].id = i;
357 vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
360 /* Chain all the descriptors in the ring with an END */
362 vring_desc_init_split(struct vring_desc *dp, uint16_t n)
366 for (i = 0; i < n - 1; i++)
367 dp[i].next = (uint16_t)(i + 1);
368 dp[i].next = VQ_RING_DESC_CHAIN_END;
372 * Tell the backend not to interrupt us. Implementation for packed virtqueues.
375 virtqueue_disable_intr_packed(struct virtqueue *vq)
377 if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
378 vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
379 vq->vq_packed.ring.driver->desc_event_flags =
380 vq->vq_packed.event_flags_shadow;
385 * Tell the backend not to interrupt us. Implementation for split virtqueues.
388 virtqueue_disable_intr_split(struct virtqueue *vq)
390 vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
394 * Tell the backend not to interrupt us.
397 virtqueue_disable_intr(struct virtqueue *vq)
399 if (vtpci_packed_queue(vq->hw))
400 virtqueue_disable_intr_packed(vq);
402 virtqueue_disable_intr_split(vq);
406 * Tell the backend to interrupt. Implementation for packed virtqueues.
409 virtqueue_enable_intr_packed(struct virtqueue *vq)
411 if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
412 vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
413 vq->vq_packed.ring.driver->desc_event_flags =
414 vq->vq_packed.event_flags_shadow;
419 * Tell the backend to interrupt. Implementation for split virtqueues.
422 virtqueue_enable_intr_split(struct virtqueue *vq)
424 vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
428 * Tell the backend to interrupt us.
431 virtqueue_enable_intr(struct virtqueue *vq)
433 if (vtpci_packed_queue(vq->hw))
434 virtqueue_enable_intr_packed(vq);
436 virtqueue_enable_intr_split(vq);
440 * Dump virtqueue internal structures, for debug purpose only.
442 void virtqueue_dump(struct virtqueue *vq);
444 * Get all mbufs to be freed.
446 struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
448 /* Flush the elements in the used ring. */
449 void virtqueue_rxvq_flush(struct virtqueue *vq);
451 int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
453 int virtqueue_txvq_reset_packed(struct virtqueue *vq);
456 virtqueue_full(const struct virtqueue *vq)
458 return vq->vq_free_cnt == 0;
462 virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
464 if (vtpci_queue_idx == hw->max_queue_pairs * 2)
466 else if (vtpci_queue_idx % 2 == 0)
472 #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_split.ring.used->idx - \
473 (vq)->vq_used_cons_idx))
475 void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
476 void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
477 void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
481 vq_update_avail_idx(struct virtqueue *vq)
483 virtio_wmb(vq->hw->weak_barriers);
484 vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
488 vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
492 * Place the head of the descriptor chain into the next slot and make
493 * it usable to the host. The chain is made available now rather than
494 * deferring to virtqueue_notify() in the hopes that if the host is
495 * currently running on another CPU, we can keep it processing the new
498 avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
499 if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx))
500 vq->vq_split.ring.avail->ring[avail_idx] = desc_idx;
505 virtqueue_kick_prepare(struct virtqueue *vq)
508 * Ensure updated avail->idx is visible to vhost before reading
511 virtio_mb(vq->hw->weak_barriers);
512 return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY);
516 virtqueue_kick_prepare_packed(struct virtqueue *vq)
521 * Ensure updated data is visible to vhost before reading the flags.
523 virtio_mb(vq->hw->weak_barriers);
524 flags = vq->vq_packed.ring.device->desc_event_flags;
526 return flags != RING_EVENT_FLAGS_DISABLE;
530 * virtqueue_kick_prepare*() or the virtio_wmb() should be called
531 * before this function to be sure that all the data is visible to vhost.
534 virtqueue_notify(struct virtqueue *vq)
536 VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
539 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
540 #define VIRTQUEUE_DUMP(vq) do { \
541 uint16_t used_idx, nused; \
542 used_idx = (vq)->vq_split.ring.used->idx; \
543 nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
544 if (vtpci_packed_queue((vq)->hw)) { \
545 PMD_INIT_LOG(DEBUG, \
546 "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
547 " cached_flags=0x%x; used_wrap_counter=%d", \
548 (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
549 (vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \
550 (vq)->vq_packed.used_wrap_counter); \
553 PMD_INIT_LOG(DEBUG, \
554 "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
555 " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
556 " avail.flags=0x%x; used.flags=0x%x", \
557 (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
558 (vq)->vq_desc_head_idx, (vq)->vq_split.ring.avail->idx, \
559 (vq)->vq_used_cons_idx, (vq)->vq_split.ring.used->idx, \
560 (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
563 #define VIRTQUEUE_DUMP(vq) do { } while (0)
566 /* avoid write operation when necessary, to lessen cache issues */
567 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
568 typeof(var) var_ = (var); \
569 typeof(val) val_ = (val); \
570 if ((var_) != (val_)) \
574 #define virtqueue_clear_net_hdr(hdr) do { \
575 typeof(hdr) hdr_ = (hdr); \
576 ASSIGN_UNLESS_EQUAL((hdr_)->csum_start, 0); \
577 ASSIGN_UNLESS_EQUAL((hdr_)->csum_offset, 0); \
578 ASSIGN_UNLESS_EQUAL((hdr_)->flags, 0); \
579 ASSIGN_UNLESS_EQUAL((hdr_)->gso_type, 0); \
580 ASSIGN_UNLESS_EQUAL((hdr_)->gso_size, 0); \
581 ASSIGN_UNLESS_EQUAL((hdr_)->hdr_len, 0); \
585 virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
586 struct rte_mbuf *cookie,
590 if (cookie->ol_flags & PKT_TX_TCP_SEG)
591 cookie->ol_flags |= PKT_TX_TCP_CKSUM;
593 switch (cookie->ol_flags & PKT_TX_L4_MASK) {
594 case PKT_TX_UDP_CKSUM:
595 hdr->csum_start = cookie->l2_len + cookie->l3_len;
596 hdr->csum_offset = offsetof(struct rte_udp_hdr,
598 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
601 case PKT_TX_TCP_CKSUM:
602 hdr->csum_start = cookie->l2_len + cookie->l3_len;
603 hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
604 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
608 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
609 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
610 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
614 /* TCP Segmentation Offload */
615 if (cookie->ol_flags & PKT_TX_TCP_SEG) {
616 hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
617 VIRTIO_NET_HDR_GSO_TCPV6 :
618 VIRTIO_NET_HDR_GSO_TCPV4;
619 hdr->gso_size = cookie->tso_segsz;
625 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
626 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
627 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
633 virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
634 uint16_t needed, int can_push, int in_order)
636 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
637 struct vq_desc_extra *dxp;
638 struct virtqueue *vq = txvq->vq;
639 struct vring_packed_desc *start_dp, *head_dp;
640 uint16_t idx, id, head_idx, head_flags;
641 int16_t head_size = vq->hw->vtnet_hdr_size;
642 struct virtio_net_hdr *hdr;
644 bool prepend_header = false;
646 id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
648 dxp = &vq->vq_descx[id];
649 dxp->ndescs = needed;
650 dxp->cookie = cookie;
652 head_idx = vq->vq_avail_idx;
655 start_dp = vq->vq_packed.ring.desc;
657 head_dp = &vq->vq_packed.ring.desc[idx];
658 head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
659 head_flags |= vq->vq_packed.cached_flags;
662 /* prepend cannot fail, checked by caller */
663 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
665 prepend_header = true;
667 /* if offload disabled, it is not zeroed below, do it now */
668 if (!vq->hw->has_tx_offload)
669 virtqueue_clear_net_hdr(hdr);
671 /* setup first tx ring slot to point to header
672 * stored in reserved region.
674 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
675 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
676 start_dp[idx].len = vq->hw->vtnet_hdr_size;
677 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
679 if (idx >= vq->vq_nentries) {
680 idx -= vq->vq_nentries;
681 vq->vq_packed.cached_flags ^=
682 VRING_PACKED_DESC_F_AVAIL_USED;
686 virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
691 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
692 start_dp[idx].len = cookie->data_len;
693 if (prepend_header) {
694 start_dp[idx].addr -= head_size;
695 start_dp[idx].len += head_size;
696 prepend_header = false;
699 if (likely(idx != head_idx)) {
700 flags = cookie->next ? VRING_DESC_F_NEXT : 0;
701 flags |= vq->vq_packed.cached_flags;
702 start_dp[idx].flags = flags;
706 if (idx >= vq->vq_nentries) {
707 idx -= vq->vq_nentries;
708 vq->vq_packed.cached_flags ^=
709 VRING_PACKED_DESC_F_AVAIL_USED;
711 } while ((cookie = cookie->next) != NULL);
713 start_dp[prev].id = id;
715 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
716 vq->vq_avail_idx = idx;
719 vq->vq_desc_head_idx = dxp->next;
720 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
721 vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
724 virtqueue_store_flags_packed(head_dp, head_flags,
725 vq->hw->weak_barriers);
729 vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
731 struct vq_desc_extra *dxp;
733 dxp = &vq->vq_descx[id];
734 vq->vq_free_cnt += dxp->ndescs;
736 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
737 vq->vq_desc_head_idx = id;
739 vq->vq_descx[vq->vq_desc_tail_idx].next = id;
741 vq->vq_desc_tail_idx = id;
742 dxp->next = VQ_RING_DESC_CHAIN_END;
746 virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
748 uint16_t used_idx, id, curr_id, free_cnt = 0;
749 uint16_t size = vq->vq_nentries;
750 struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
751 struct vq_desc_extra *dxp;
753 used_idx = vq->vq_used_cons_idx;
754 /* desc_is_used has a load-acquire or rte_cio_rmb inside
755 * and wait for used desc in virtqueue.
757 while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
758 id = desc[used_idx].id;
761 dxp = &vq->vq_descx[used_idx];
762 used_idx += dxp->ndescs;
763 free_cnt += dxp->ndescs;
765 if (used_idx >= size) {
767 vq->vq_packed.used_wrap_counter ^= 1;
769 if (dxp->cookie != NULL) {
770 rte_pktmbuf_free(dxp->cookie);
773 } while (curr_id != id);
775 vq->vq_used_cons_idx = used_idx;
776 vq->vq_free_cnt += free_cnt;
780 virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
782 uint16_t used_idx, id;
783 uint16_t size = vq->vq_nentries;
784 struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
785 struct vq_desc_extra *dxp;
787 used_idx = vq->vq_used_cons_idx;
788 /* desc_is_used has a load-acquire or rte_cio_rmb inside
789 * and wait for used desc in virtqueue.
791 while (num-- && desc_is_used(&desc[used_idx], vq)) {
792 id = desc[used_idx].id;
793 dxp = &vq->vq_descx[id];
794 vq->vq_used_cons_idx += dxp->ndescs;
795 if (vq->vq_used_cons_idx >= size) {
796 vq->vq_used_cons_idx -= size;
797 vq->vq_packed.used_wrap_counter ^= 1;
799 vq_ring_free_id_packed(vq, id);
800 if (dxp->cookie != NULL) {
801 rte_pktmbuf_free(dxp->cookie);
804 used_idx = vq->vq_used_cons_idx;
808 /* Cleanup from completed transmits. */
810 virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)
813 virtio_xmit_cleanup_inorder_packed(vq, num);
815 virtio_xmit_cleanup_normal_packed(vq, num);
819 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
821 uint16_t i, used_idx, desc_idx;
822 for (i = 0; i < num; i++) {
823 struct vring_used_elem *uep;
824 struct vq_desc_extra *dxp;
826 used_idx = (uint16_t)(vq->vq_used_cons_idx &
827 (vq->vq_nentries - 1));
828 uep = &vq->vq_split.ring.used->ring[used_idx];
830 desc_idx = (uint16_t)uep->id;
831 dxp = &vq->vq_descx[desc_idx];
832 vq->vq_used_cons_idx++;
833 vq_ring_free_chain(vq, desc_idx);
835 if (dxp->cookie != NULL) {
836 rte_pktmbuf_free(dxp->cookie);
842 /* Cleanup from completed inorder transmits. */
843 static __rte_always_inline void
844 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
846 uint16_t i, idx = vq->vq_used_cons_idx;
847 int16_t free_cnt = 0;
848 struct vq_desc_extra *dxp = NULL;
850 if (unlikely(num == 0))
853 for (i = 0; i < num; i++) {
854 dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
855 free_cnt += dxp->ndescs;
856 if (dxp->cookie != NULL) {
857 rte_pktmbuf_free(dxp->cookie);
862 vq->vq_free_cnt += free_cnt;
863 vq->vq_used_cons_idx = idx;
865 #endif /* _VIRTQUEUE_H_ */