X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtqueue.h;h=105a9c00c504cfb7f9e91be3a578482df9e25185;hb=c47d6e83334e656f85e4bb6881cf63da38276b0a;hp=d8ae5cdec40242bd8e271bc284988b87e29348bc;hpb=892dc798fa9c24e6172b8bcecc9586f2f9a7a49e;p=dpdk.git diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index d8ae5cdec4..105a9c00c5 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -10,6 +10,7 @@ #include #include #include +#include #include "virtio_pci.h" #include "virtio_ring.h" @@ -18,17 +19,93 @@ struct rte_mbuf; +#define DEFAULT_TX_FREE_THRESH 32 +#define DEFAULT_RX_FREE_THRESH 32 + +#define VIRTIO_MBUF_BURST_SZ 64 /* - * Per virtio_config.h in Linux. + * Per virtio_ring.h in Linux. * For virtio_pci on SMP, we don't need to order with respect to MMIO * accesses through relaxed memory I/O windows, so smp_mb() et al are * sufficient. * + * For using virtio to talk to real devices (eg. vDPA) we do need real + * barriers. + */ +static inline void +virtio_mb(uint8_t weak_barriers) +{ + if (weak_barriers) + rte_smp_mb(); + else + rte_mb(); +} + +static inline void +virtio_rmb(uint8_t weak_barriers) +{ + if (weak_barriers) + rte_smp_rmb(); + else + rte_cio_rmb(); +} + +static inline void +virtio_wmb(uint8_t weak_barriers) +{ + if (weak_barriers) + rte_smp_wmb(); + else + rte_cio_wmb(); +} + +static inline uint16_t +virtqueue_fetch_flags_packed(struct vring_packed_desc *dp, + uint8_t weak_barriers) +{ + uint16_t flags; + + if (weak_barriers) { +/* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports + * a better perf(~1.5%), which comes from the saved branch by the compiler. + * The if and else branch are identical with the smp and cio barriers both + * defined as compiler barriers on x86. */ -#define virtio_mb() rte_smp_mb() -#define virtio_rmb() rte_smp_rmb() -#define virtio_wmb() rte_smp_wmb() +#ifdef RTE_ARCH_X86_64 + flags = dp->flags; + rte_smp_rmb(); +#else + flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE); +#endif + } else { + flags = dp->flags; + rte_cio_rmb(); + } + return flags; +} + +static inline void +virtqueue_store_flags_packed(struct vring_packed_desc *dp, + uint16_t flags, uint8_t weak_barriers) +{ + if (weak_barriers) { +/* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports + * a better perf(~1.5%), which comes from the saved branch by the compiler. + * The if and else branch are identical with the smp and cio barriers both + * defined as compiler barriers on x86. + */ +#ifdef RTE_ARCH_X86_64 + rte_smp_wmb(); + dp->flags = flags; +#else + __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE); +#endif + } else { + rte_cio_wmb(); + dp->flags = flags; + } +} #ifdef RTE_PMD_PACKET_PREFETCH #define rte_packet_prefetch(p) rte_prefetch1(p) #else @@ -109,8 +186,8 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 }; */ struct virtio_net_ctrl_mac { uint32_t entries; - uint8_t macs[][ETHER_ADDR_LEN]; -} __attribute__((__packed__)); + uint8_t macs[][RTE_ETHER_ADDR_LEN]; +} __rte_packed; #define VIRTIO_NET_CTRL_MAC 1 #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 @@ -143,7 +220,7 @@ struct virtio_net_ctrl_mac { struct virtio_net_ctrl_hdr { uint8_t class; uint8_t cmd; -} __attribute__((packed)); +} __rte_packed; typedef uint8_t virtio_net_ctrl_ack; @@ -166,17 +243,22 @@ struct vq_desc_extra { struct virtqueue { struct virtio_hw *hw; /**< virtio_hw structure pointer. */ - struct vring vq_ring; /**< vring keeping desc, used and avail */ - struct vring_packed ring_packed; /**< vring keeping descs */ - bool avail_wrap_counter; - bool used_wrap_counter; - uint16_t event_flags_shadow; - uint16_t avail_used_flags; - /** - * Last consumed descriptor in the used table, - * trails vq_ring.used->idx. - */ - uint16_t vq_used_cons_idx; + union { + struct { + /**< vring keeping desc, used and avail */ + struct vring ring; + } vq_split; + + struct { + /**< vring keeping descs and events */ + struct vring_packed ring; + bool used_wrap_counter; + uint16_t cached_flags; /**< cached flags for descs */ + uint16_t event_flags_shadow; + } vq_packed; + }; + + uint16_t vq_used_cons_idx; /**< last consumed descriptor */ uint16_t vq_nentries; /**< vring desc numbers */ uint16_t vq_free_cnt; /**< num of desc available */ uint16_t vq_avail_idx; /**< sync until needed */ @@ -247,12 +329,8 @@ struct virtio_net_hdr_mrg_rxbuf { #define VIRTIO_MAX_TX_INDIRECT 8 struct virtio_tx_region { struct virtio_net_hdr_mrg_rxbuf tx_hdr; - union { - struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT] - __attribute__((__aligned__(16))); - struct vring_packed_desc tx_indir_pq[VIRTIO_MAX_TX_INDIRECT] - __attribute__((__aligned__(16))); - }; + struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT] + __rte_aligned(16); }; static inline int @@ -260,23 +338,22 @@ desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq) { uint16_t used, avail, flags; - flags = desc->flags; - used = !!(flags & VRING_DESC_F_USED(1)); - avail = !!(flags & VRING_DESC_F_AVAIL(1)); + flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers); + used = !!(flags & VRING_PACKED_DESC_F_USED); + avail = !!(flags & VRING_PACKED_DESC_F_AVAIL); - return avail == used && used == vq->used_wrap_counter; + return avail == used && used == vq->vq_packed.used_wrap_counter; } - static inline void vring_desc_init_packed(struct virtqueue *vq, int n) { int i; for (i = 0; i < n - 1; i++) { - vq->ring_packed.desc_packed[i].id = i; + vq->vq_packed.ring.desc[i].id = i; vq->vq_descx[i].next = i + 1; } - vq->ring_packed.desc_packed[i].id = i; + vq->vq_packed.ring.desc[i].id = i; vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END; } @@ -292,16 +369,26 @@ vring_desc_init_split(struct vring_desc *dp, uint16_t n) } /** - * Tell the backend not to interrupt us. + * Tell the backend not to interrupt us. Implementation for packed virtqueues. */ static inline void virtqueue_disable_intr_packed(struct virtqueue *vq) { - uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags; - - *event_flags = RING_EVENT_FLAGS_DISABLE; + if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) { + vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE; + vq->vq_packed.ring.driver->desc_event_flags = + vq->vq_packed.event_flags_shadow; + } } +/** + * Tell the backend not to interrupt us. Implementation for split virtqueues. + */ +static inline void +virtqueue_disable_intr_split(struct virtqueue *vq) +{ + vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; +} /** * Tell the backend not to interrupt us. @@ -312,7 +399,7 @@ virtqueue_disable_intr(struct virtqueue *vq) if (vtpci_packed_queue(vq->hw)) virtqueue_disable_intr_packed(vq); else - vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; + virtqueue_disable_intr_split(vq); } /** @@ -321,13 +408,10 @@ virtqueue_disable_intr(struct virtqueue *vq) static inline void virtqueue_enable_intr_packed(struct virtqueue *vq) { - uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags; - - - if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) { - virtio_wmb(); - vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE; - *event_flags = vq->event_flags_shadow; + if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) { + vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE; + vq->vq_packed.ring.driver->desc_event_flags = + vq->vq_packed.event_flags_shadow; } } @@ -337,7 +421,7 @@ virtqueue_enable_intr_packed(struct virtqueue *vq) static inline void virtqueue_enable_intr_split(struct virtqueue *vq) { - vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT); + vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT); } /** @@ -364,6 +448,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq); /* Flush the elements in the used ring. */ void virtqueue_rxvq_flush(struct virtqueue *vq); +int virtqueue_rxvq_reset_packed(struct virtqueue *vq); + +int virtqueue_txvq_reset_packed(struct virtqueue *vq); + static inline int virtqueue_full(const struct virtqueue *vq) { @@ -381,7 +469,33 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx) return VTNET_TQ; } -#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx)) +/* virtqueue_nused has load-acquire or rte_cio_rmb insed */ +static inline uint16_t +virtqueue_nused(const struct virtqueue *vq) +{ + uint16_t idx; + + if (vq->hw->weak_barriers) { + /** + * x86 prefers to using rte_smp_rmb over __atomic_load_n as it + * reports a slightly better perf, which comes from the saved + * branch by the compiler. + * The if and else branches are identical with the smp and cio + * barriers both defined as compiler barriers on x86. + */ +#ifdef RTE_ARCH_X86_64 + idx = vq->vq_split.ring.used->idx; + rte_smp_rmb(); +#else + idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, + __ATOMIC_ACQUIRE); +#endif + } else { + idx = vq->vq_split.ring.used->idx; + rte_cio_rmb(); + } + return idx - vq->vq_used_cons_idx; +} void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx); void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx); @@ -391,8 +505,24 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, static inline void vq_update_avail_idx(struct virtqueue *vq) { - virtio_wmb(); - vq->vq_ring.avail->idx = vq->vq_avail_idx; + if (vq->hw->weak_barriers) { + /* x86 prefers to using rte_smp_wmb over __atomic_store_n as + * it reports a slightly better perf, which comes from the + * saved branch by the compiler. + * The if and else branches are identical with the smp and + * cio barriers both defined as compiler barriers on x86. + */ +#ifdef RTE_ARCH_X86_64 + rte_smp_wmb(); + vq->vq_split.ring.avail->idx = vq->vq_avail_idx; +#else + __atomic_store_n(&vq->vq_split.ring.avail->idx, + vq->vq_avail_idx, __ATOMIC_RELEASE); +#endif + } else { + rte_cio_wmb(); + vq->vq_split.ring.avail->idx = vq->vq_avail_idx; + } } static inline void @@ -407,15 +537,20 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx) * descriptor. */ avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1)); - if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx)) - vq->vq_ring.avail->ring[avail_idx] = desc_idx; + if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx)) + vq->vq_split.ring.avail->ring[avail_idx] = desc_idx; vq->vq_avail_idx++; } static inline int virtqueue_kick_prepare(struct virtqueue *vq) { - return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY); + /* + * Ensure updated avail->idx is visible to vhost before reading + * the used->flags. + */ + virtio_mb(vq->hw->weak_barriers); + return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY); } static inline int @@ -423,48 +558,350 @@ virtqueue_kick_prepare_packed(struct virtqueue *vq) { uint16_t flags; - virtio_mb(); - flags = vq->ring_packed.device_event->desc_event_flags; + /* + * Ensure updated data is visible to vhost before reading the flags. + */ + virtio_mb(vq->hw->weak_barriers); + flags = vq->vq_packed.ring.device->desc_event_flags; return flags != RING_EVENT_FLAGS_DISABLE; } +/* + * virtqueue_kick_prepare*() or the virtio_wmb() should be called + * before this function to be sure that all the data is visible to vhost. + */ static inline void virtqueue_notify(struct virtqueue *vq) { - /* - * Ensure updated avail->idx is visible to host. - * For virtio on IA, the notificaiton is through io port operation - * which is a serialization instruction itself. - */ VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq); } #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP #define VIRTQUEUE_DUMP(vq) do { \ uint16_t used_idx, nused; \ - used_idx = (vq)->vq_ring.used->idx; \ + used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \ + __ATOMIC_RELAXED); \ nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \ if (vtpci_packed_queue((vq)->hw)) { \ PMD_INIT_LOG(DEBUG, \ "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \ - "VQ: - avail_wrap_counter=%d; used_wrap_counter=%d", \ + " cached_flags=0x%x; used_wrap_counter=%d", \ (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \ - (vq)->vq_avail_idx, (vq)->avail_wrap_counter, \ - (vq)->used_wrap_counter); \ + (vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \ + (vq)->vq_packed.used_wrap_counter); \ break; \ } \ PMD_INIT_LOG(DEBUG, \ "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \ " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \ " avail.flags=0x%x; used.flags=0x%x", \ - (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \ - (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \ - (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \ - (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \ + (vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \ + (vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \ + __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \ + (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \ } while (0) #else #define VIRTQUEUE_DUMP(vq) do { } while (0) #endif +/* avoid write operation when necessary, to lessen cache issues */ +#define ASSIGN_UNLESS_EQUAL(var, val) do { \ + typeof(var) var_ = (var); \ + typeof(val) val_ = (val); \ + if ((var_) != (val_)) \ + (var_) = (val_); \ +} while (0) + +#define virtqueue_clear_net_hdr(hdr) do { \ + typeof(hdr) hdr_ = (hdr); \ + ASSIGN_UNLESS_EQUAL((hdr_)->csum_start, 0); \ + ASSIGN_UNLESS_EQUAL((hdr_)->csum_offset, 0); \ + ASSIGN_UNLESS_EQUAL((hdr_)->flags, 0); \ + ASSIGN_UNLESS_EQUAL((hdr_)->gso_type, 0); \ + ASSIGN_UNLESS_EQUAL((hdr_)->gso_size, 0); \ + ASSIGN_UNLESS_EQUAL((hdr_)->hdr_len, 0); \ +} while (0) + +static inline void +virtqueue_xmit_offload(struct virtio_net_hdr *hdr, + struct rte_mbuf *cookie, + bool offload) +{ + if (offload) { + if (cookie->ol_flags & PKT_TX_TCP_SEG) + cookie->ol_flags |= PKT_TX_TCP_CKSUM; + + switch (cookie->ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + hdr->csum_start = cookie->l2_len + cookie->l3_len; + hdr->csum_offset = offsetof(struct rte_udp_hdr, + dgram_cksum); + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + break; + + case PKT_TX_TCP_CKSUM: + hdr->csum_start = cookie->l2_len + cookie->l3_len; + hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum); + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + break; + + default: + ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); + ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); + ASSIGN_UNLESS_EQUAL(hdr->flags, 0); + break; + } + + /* TCP Segmentation Offload */ + if (cookie->ol_flags & PKT_TX_TCP_SEG) { + hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ? + VIRTIO_NET_HDR_GSO_TCPV6 : + VIRTIO_NET_HDR_GSO_TCPV4; + hdr->gso_size = cookie->tso_segsz; + hdr->hdr_len = + cookie->l2_len + + cookie->l3_len + + cookie->l4_len; + } else { + ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); + ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); + } + } +} + +static inline void +virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, + uint16_t needed, int can_push, int in_order) +{ + struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; + struct vq_desc_extra *dxp; + struct virtqueue *vq = txvq->vq; + struct vring_packed_desc *start_dp, *head_dp; + uint16_t idx, id, head_idx, head_flags; + int16_t head_size = vq->hw->vtnet_hdr_size; + struct virtio_net_hdr *hdr; + uint16_t prev; + bool prepend_header = false; + + id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx; + + dxp = &vq->vq_descx[id]; + dxp->ndescs = needed; + dxp->cookie = cookie; + + head_idx = vq->vq_avail_idx; + idx = head_idx; + prev = head_idx; + start_dp = vq->vq_packed.ring.desc; + + head_dp = &vq->vq_packed.ring.desc[idx]; + head_flags = cookie->next ? VRING_DESC_F_NEXT : 0; + head_flags |= vq->vq_packed.cached_flags; + + if (can_push) { + /* prepend cannot fail, checked by caller */ + hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *, + -head_size); + prepend_header = true; + + /* if offload disabled, it is not zeroed below, do it now */ + if (!vq->hw->has_tx_offload) + virtqueue_clear_net_hdr(hdr); + } else { + /* setup first tx ring slot to point to header + * stored in reserved region. + */ + start_dp[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); + start_dp[idx].len = vq->hw->vtnet_hdr_size; + hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; + idx++; + if (idx >= vq->vq_nentries) { + idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= + VRING_PACKED_DESC_F_AVAIL_USED; + } + } + + virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload); + + do { + uint16_t flags; + + start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); + start_dp[idx].len = cookie->data_len; + if (prepend_header) { + start_dp[idx].addr -= head_size; + start_dp[idx].len += head_size; + prepend_header = false; + } + + if (likely(idx != head_idx)) { + flags = cookie->next ? VRING_DESC_F_NEXT : 0; + flags |= vq->vq_packed.cached_flags; + start_dp[idx].flags = flags; + } + prev = idx; + idx++; + if (idx >= vq->vq_nentries) { + idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= + VRING_PACKED_DESC_F_AVAIL_USED; + } + } while ((cookie = cookie->next) != NULL); + + start_dp[prev].id = id; + + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); + vq->vq_avail_idx = idx; + + if (!in_order) { + vq->vq_desc_head_idx = dxp->next; + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) + vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END; + } + + virtqueue_store_flags_packed(head_dp, head_flags, + vq->hw->weak_barriers); +} + +static void +vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id) +{ + struct vq_desc_extra *dxp; + + dxp = &vq->vq_descx[id]; + vq->vq_free_cnt += dxp->ndescs; + + if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) + vq->vq_desc_head_idx = id; + else + vq->vq_descx[vq->vq_desc_tail_idx].next = id; + + vq->vq_desc_tail_idx = id; + dxp->next = VQ_RING_DESC_CHAIN_END; +} + +static void +virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num) +{ + uint16_t used_idx, id, curr_id, free_cnt = 0; + uint16_t size = vq->vq_nentries; + struct vring_packed_desc *desc = vq->vq_packed.ring.desc; + struct vq_desc_extra *dxp; + + used_idx = vq->vq_used_cons_idx; + /* desc_is_used has a load-acquire or rte_cio_rmb inside + * and wait for used desc in virtqueue. + */ + while (num > 0 && desc_is_used(&desc[used_idx], vq)) { + id = desc[used_idx].id; + do { + curr_id = used_idx; + dxp = &vq->vq_descx[used_idx]; + used_idx += dxp->ndescs; + free_cnt += dxp->ndescs; + num -= dxp->ndescs; + if (used_idx >= size) { + used_idx -= size; + vq->vq_packed.used_wrap_counter ^= 1; + } + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + } while (curr_id != id); + } + vq->vq_used_cons_idx = used_idx; + vq->vq_free_cnt += free_cnt; +} + +static void +virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num) +{ + uint16_t used_idx, id; + uint16_t size = vq->vq_nentries; + struct vring_packed_desc *desc = vq->vq_packed.ring.desc; + struct vq_desc_extra *dxp; + + used_idx = vq->vq_used_cons_idx; + /* desc_is_used has a load-acquire or rte_cio_rmb inside + * and wait for used desc in virtqueue. + */ + while (num-- && desc_is_used(&desc[used_idx], vq)) { + id = desc[used_idx].id; + dxp = &vq->vq_descx[id]; + vq->vq_used_cons_idx += dxp->ndescs; + if (vq->vq_used_cons_idx >= size) { + vq->vq_used_cons_idx -= size; + vq->vq_packed.used_wrap_counter ^= 1; + } + vq_ring_free_id_packed(vq, id); + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + used_idx = vq->vq_used_cons_idx; + } +} + +/* Cleanup from completed transmits. */ +static inline void +virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order) +{ + if (in_order) + virtio_xmit_cleanup_inorder_packed(vq, num); + else + virtio_xmit_cleanup_normal_packed(vq, num); +} + +static inline void +virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num) +{ + uint16_t i, used_idx, desc_idx; + for (i = 0; i < num; i++) { + struct vring_used_elem *uep; + struct vq_desc_extra *dxp; + + used_idx = (uint16_t)(vq->vq_used_cons_idx & + (vq->vq_nentries - 1)); + uep = &vq->vq_split.ring.used->ring[used_idx]; + + desc_idx = (uint16_t)uep->id; + dxp = &vq->vq_descx[desc_idx]; + vq->vq_used_cons_idx++; + vq_ring_free_chain(vq, desc_idx); + + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + } +} + +/* Cleanup from completed inorder transmits. */ +static __rte_always_inline void +virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num) +{ + uint16_t i, idx = vq->vq_used_cons_idx; + int16_t free_cnt = 0; + struct vq_desc_extra *dxp = NULL; + + if (unlikely(num == 0)) + return; + + for (i = 0; i < num; i++) { + dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)]; + free_cnt += dxp->ndescs; + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + } + + vq->vq_free_cnt += free_cnt; + vq->vq_used_cons_idx = idx; +} #endif /* _VIRTQUEUE_H_ */