struct vq_desc_extra *dxp;
uint16_t desc_idx_last = desc_idx;
- dp = &vq->vq_ring.desc[desc_idx];
+ dp = &vq->vq_split.ring.desc[desc_idx];
dxp = &vq->vq_descx[desc_idx];
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
while (dp->flags & VRING_DESC_F_NEXT) {
desc_idx_last = dp->next;
- dp = &vq->vq_ring.desc[dp->next];
+ dp = &vq->vq_split.ring.desc[dp->next];
}
}
dxp->ndescs = 0;
if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
vq->vq_desc_head_idx = desc_idx;
} else {
- dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+ dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
dp_tail->next = desc_idx;
}
dxp->next = VQ_RING_DESC_CHAIN_END;
}
+void
+virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
+{
+ uint32_t s = mbuf->pkt_len;
+ struct rte_ether_addr *ea;
+
+ stats->bytes += s;
+
+ if (s == 64) {
+ stats->size_bins[1]++;
+ } else if (s > 64 && s < 1024) {
+ uint32_t bin;
+
+ /* count zeros, and offset into correct bin */
+ bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
+ stats->size_bins[bin]++;
+ } else {
+ if (s < 64)
+ stats->size_bins[0]++;
+ else if (s < 1519)
+ stats->size_bins[6]++;
+ else
+ stats->size_bins[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
+ stats->broadcast++;
+ else
+ stats->multicast++;
+ }
+}
+
+static inline void
+virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
+{
+ VIRTIO_DUMP_PACKET(m, m->data_len);
+
+ virtio_update_packet_stats(&rxvq->stats, m);
+}
+
static uint16_t
virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
struct rte_mbuf **rx_pkts,
struct vring_packed_desc *desc;
uint16_t i;
- desc = vq->ring_packed.desc_packed;
+ desc = vq->vq_packed.ring.desc;
for (i = 0; i < num; i++) {
used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ * and wait for used desc in virtqueue.
+ */
if (!desc_is_used(&desc[used_idx], vq))
return i;
len[i] = desc[used_idx].len;
vq->vq_used_cons_idx++;
if (vq->vq_used_cons_idx >= vq->vq_nentries) {
vq->vq_used_cons_idx -= vq->vq_nentries;
- vq->used_wrap_counter ^= 1;
+ vq->vq_packed.used_wrap_counter ^= 1;
}
}
/* Caller does the check */
for (i = 0; i < num ; i++) {
used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- uep = &vq->vq_ring.used->ring[used_idx];
+ uep = &vq->vq_split.ring.used->ring[used_idx];
desc_idx = (uint16_t) uep->id;
len[i] = uep->len;
cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
for (i = 0; i < num; i++) {
used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
/* Desc idx same as used idx */
- uep = &vq->vq_ring.used->ring[used_idx];
+ uep = &vq->vq_split.ring.used->ring[used_idx];
len[i] = uep->len;
cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
#define DEFAULT_TX_FREE_THRESH 32
#endif
-/* Cleanup from completed transmits. */
static void
-virtio_xmit_cleanup_packed(struct virtqueue *vq, int num)
+virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
+{
+ uint16_t used_idx, id, curr_id, free_cnt = 0;
+ uint16_t size = vq->vq_nentries;
+ struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
+ struct vq_desc_extra *dxp;
+
+ used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ * and wait for used desc in virtqueue.
+ */
+ while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
+ id = desc[used_idx].id;
+ do {
+ curr_id = used_idx;
+ dxp = &vq->vq_descx[used_idx];
+ used_idx += dxp->ndescs;
+ free_cnt += dxp->ndescs;
+ num -= dxp->ndescs;
+ if (used_idx >= size) {
+ used_idx -= size;
+ vq->vq_packed.used_wrap_counter ^= 1;
+ }
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ } while (curr_id != id);
+ }
+ vq->vq_used_cons_idx = used_idx;
+ vq->vq_free_cnt += free_cnt;
+}
+
+static void
+virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
{
uint16_t used_idx, id;
uint16_t size = vq->vq_nentries;
- struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
+ struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ * and wait for used desc in virtqueue.
+ */
while (num-- && desc_is_used(&desc[used_idx], vq)) {
- virtio_rmb(vq->hw->weak_barriers);
id = desc[used_idx].id;
dxp = &vq->vq_descx[id];
vq->vq_used_cons_idx += dxp->ndescs;
if (vq->vq_used_cons_idx >= size) {
vq->vq_used_cons_idx -= size;
- vq->used_wrap_counter ^= 1;
+ vq->vq_packed.used_wrap_counter ^= 1;
}
vq_ring_free_id_packed(vq, id);
if (dxp->cookie != NULL) {
}
}
+/* Cleanup from completed transmits. */
+static inline void
+virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)
+{
+ if (in_order)
+ virtio_xmit_cleanup_inorder_packed(vq, num);
+ else
+ virtio_xmit_cleanup_normal_packed(vq, num);
+}
+
static void
virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
{
struct vq_desc_extra *dxp;
used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- uep = &vq->vq_ring.used->ring[used_idx];
+ uep = &vq->vq_split.ring.used->ring[used_idx];
desc_idx = (uint16_t) uep->id;
dxp = &vq->vq_descx[desc_idx];
}
/* Cleanup from completed inorder transmits. */
-static void
+static __rte_always_inline void
virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
{
- uint16_t i, used_idx, desc_idx = 0, last_idx;
+ uint16_t i, idx = vq->vq_used_cons_idx;
int16_t free_cnt = 0;
struct vq_desc_extra *dxp = NULL;
return;
for (i = 0; i < num; i++) {
- struct vring_used_elem *uep;
-
- used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
- uep = &vq->vq_ring.used->ring[used_idx];
- desc_idx = (uint16_t)uep->id;
-
- dxp = &vq->vq_descx[desc_idx];
- vq->vq_used_cons_idx++;
-
+ dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
+ free_cnt += dxp->ndescs;
if (dxp->cookie != NULL) {
rte_pktmbuf_free(dxp->cookie);
dxp->cookie = NULL;
}
}
- last_idx = desc_idx + dxp->ndescs - 1;
- free_cnt = last_idx - vq->vq_desc_tail_idx;
- if (free_cnt <= 0)
- free_cnt += vq->vq_nentries;
-
- vq_ring_free_inorder(vq, last_idx, free_cnt);
+ vq->vq_free_cnt += free_cnt;
+ vq->vq_used_cons_idx = idx;
}
static inline int
return -EMSGSIZE;
head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
- start_dp = vq->vq_ring.desc;
+ start_dp = vq->vq_split.ring.desc;
while (i < num) {
idx = head_idx & (vq->vq_nentries - 1);
{
struct vq_desc_extra *dxp;
struct virtio_hw *hw = vq->hw;
- struct vring_desc *start_dp = vq->vq_ring.desc;
+ struct vring_desc *start_dp = vq->vq_split.ring.desc;
uint16_t idx, i;
if (unlikely(vq->vq_free_cnt == 0))
virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
struct rte_mbuf **cookie, uint16_t num)
{
- struct vring_packed_desc *start_dp = vq->ring_packed.desc_packed;
- uint16_t flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
+ struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
+ uint16_t flags = vq->vq_packed.cached_flags;
struct virtio_hw *hw = vq->hw;
struct vq_desc_extra *dxp;
uint16_t idx;
vq->vq_desc_head_idx = dxp->next;
if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
- virtio_wmb(hw->weak_barriers);
- start_dp[idx].flags = flags;
+
+ virtqueue_store_flags_packed(&start_dp[idx], flags,
+ hw->weak_barriers);
+
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->avail_wrap_counter ^= 1;
- vq->avail_used_flags =
- VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
- flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
+ vq->vq_packed.cached_flags ^=
+ VRING_PACKED_DESC_F_AVAIL_USED;
+ flags = vq->vq_packed.cached_flags;
}
}
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
/* common case: header is not fragmented */
if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
m->l4_len)) {
- struct ipv4_hdr *iph;
- struct ipv6_hdr *ip6h;
- struct tcp_hdr *th;
+ struct rte_ipv4_hdr *iph;
+ struct rte_ipv6_hdr *ip6h;
+ struct rte_tcp_hdr *th;
uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
uint32_t tmp;
- iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ iph = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv4_hdr *, m->l2_len);
th = RTE_PTR_ADD(iph, m->l3_len);
if ((iph->version_ihl >> 4) == 4) {
iph->hdr_checksum = 0;
ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
m->l3_len);
} else {
- ip6h = (struct ipv6_hdr *)iph;
+ ip6h = (struct rte_ipv6_hdr *)iph;
ip_paylen = ip6h->payload_len;
}
(var) = (val); \
} while (0)
+#define virtqueue_clear_net_hdr(_hdr) do { \
+ ASSIGN_UNLESS_EQUAL((_hdr)->csum_start, 0); \
+ ASSIGN_UNLESS_EQUAL((_hdr)->csum_offset, 0); \
+ ASSIGN_UNLESS_EQUAL((_hdr)->flags, 0); \
+ ASSIGN_UNLESS_EQUAL((_hdr)->gso_type, 0); \
+ ASSIGN_UNLESS_EQUAL((_hdr)->gso_size, 0); \
+ ASSIGN_UNLESS_EQUAL((_hdr)->hdr_len, 0); \
+} while (0)
+
static inline void
virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
struct rte_mbuf *cookie,
switch (cookie->ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct udp_hdr,
+ hdr->csum_offset = offsetof(struct rte_udp_hdr,
dgram_cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
case PKT_TX_TCP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
+ hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
/* TCP Segmentation Offload */
if (cookie->ol_flags & PKT_TX_TCP_SEG) {
- virtio_tso_fix_cksum(cookie);
hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
VIRTIO_NET_HDR_GSO_TCPV6 :
VIRTIO_NET_HDR_GSO_TCPV4;
uint16_t i = 0;
idx = vq->vq_desc_head_idx;
- start_dp = vq->vq_ring.desc;
+ start_dp = vq->vq_split.ring.desc;
while (i < num) {
idx = idx & (vq->vq_nentries - 1);
- dxp = &vq->vq_descx[idx];
+ dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
dxp->cookie = (void *)cookies[i];
dxp->ndescs = 1;
+ virtio_update_packet_stats(&txvq->stats, cookies[i]);
- hdr = (struct virtio_net_hdr *)
- rte_pktmbuf_prepend(cookies[i], head_size);
- cookies[i]->pkt_len -= head_size;
+ hdr = (struct virtio_net_hdr *)(char *)cookies[i]->buf_addr +
+ cookies[i]->data_off - head_size;
- /* if offload disabled, it is not zeroed below, do it now */
- if (!vq->hw->has_tx_offload) {
- ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
- ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
- ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
- ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
- ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
- ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
- }
-
- virtqueue_xmit_offload(hdr, cookies[i],
- vq->hw->has_tx_offload);
+ /* if offload disabled, hdr is not zeroed yet, do it now */
+ if (!vq->hw->has_tx_offload)
+ virtqueue_clear_net_hdr(hdr);
+ else
+ virtqueue_xmit_offload(hdr, cookies[i], true);
- start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
- start_dp[idx].len = cookies[i]->data_len;
+ start_dp[idx].addr =
+ VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
+ start_dp[idx].len = cookies[i]->data_len + head_size;
start_dp[idx].flags = 0;
+
vq_update_avail_ring(vq, idx);
idx++;
vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
}
+static inline void
+virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
+ struct rte_mbuf *cookie,
+ int in_order)
+{
+ struct virtqueue *vq = txvq->vq;
+ struct vring_packed_desc *dp;
+ struct vq_desc_extra *dxp;
+ uint16_t idx, id, flags;
+ uint16_t head_size = vq->hw->vtnet_hdr_size;
+ struct virtio_net_hdr *hdr;
+
+ id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
+ idx = vq->vq_avail_idx;
+ dp = &vq->vq_packed.ring.desc[idx];
+
+ dxp = &vq->vq_descx[id];
+ dxp->ndescs = 1;
+ dxp->cookie = cookie;
+
+ flags = vq->vq_packed.cached_flags;
+
+ /* prepend cannot fail, checked by caller */
+ hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
+ cookie->data_off - head_size;
+
+ /* if offload disabled, hdr is not zeroed yet, do it now */
+ if (!vq->hw->has_tx_offload)
+ virtqueue_clear_net_hdr(hdr);
+ else
+ virtqueue_xmit_offload(hdr, cookie, true);
+
+ dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
+ dp->len = cookie->data_len + head_size;
+ dp->id = id;
+
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
+ }
+
+ vq->vq_free_cnt--;
+
+ if (!in_order) {
+ vq->vq_desc_head_idx = dxp->next;
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
+ }
+
+ virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
+}
+
static inline void
virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
- uint16_t needed, int can_push)
+ uint16_t needed, int can_push, int in_order)
{
struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
struct vq_desc_extra *dxp;
uint16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
uint16_t prev;
+ bool prepend_header = false;
- id = vq->vq_desc_head_idx;
+ id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
dxp = &vq->vq_descx[id];
dxp->ndescs = needed;
head_idx = vq->vq_avail_idx;
idx = head_idx;
prev = head_idx;
- start_dp = vq->ring_packed.desc_packed;
+ start_dp = vq->vq_packed.ring.desc;
- head_dp = &vq->ring_packed.desc_packed[idx];
+ head_dp = &vq->vq_packed.ring.desc[idx];
head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
- head_flags |= vq->avail_used_flags;
+ head_flags |= vq->vq_packed.cached_flags;
if (can_push) {
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)
- rte_pktmbuf_prepend(cookie, head_size);
- /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
- * which is wrong. Below subtract restores correct pkt size.
- */
- cookie->pkt_len -= head_size;
+ hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
+ cookie->data_off - head_size;
+ prepend_header = true;
/* if offload disabled, it is not zeroed below, do it now */
- if (!vq->hw->has_tx_offload) {
- ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
- ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
- ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
- ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
- ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
- ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
- }
+ if (!vq->hw->has_tx_offload)
+ virtqueue_clear_net_hdr(hdr);
} else {
/* setup first tx ring slot to point to header
* stored in reserved region.
idx++;
if (idx >= vq->vq_nentries) {
idx -= vq->vq_nentries;
- vq->avail_wrap_counter ^= 1;
- vq->avail_used_flags =
- VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ vq->vq_packed.cached_flags ^=
+ VRING_PACKED_DESC_F_AVAIL_USED;
}
}
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
start_dp[idx].len = cookie->data_len;
+ if (prepend_header) {
+ start_dp[idx].addr -= head_size;
+ start_dp[idx].len += head_size;
+ prepend_header = false;
+ }
+
if (likely(idx != head_idx)) {
flags = cookie->next ? VRING_DESC_F_NEXT : 0;
- flags |= vq->avail_used_flags;
+ flags |= vq->vq_packed.cached_flags;
start_dp[idx].flags = flags;
}
prev = idx;
idx++;
if (idx >= vq->vq_nentries) {
idx -= vq->vq_nentries;
- vq->avail_wrap_counter ^= 1;
- vq->avail_used_flags =
- VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ vq->vq_packed.cached_flags ^=
+ VRING_PACKED_DESC_F_AVAIL_USED;
}
} while ((cookie = cookie->next) != NULL);
start_dp[prev].id = id;
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
-
- vq->vq_desc_head_idx = dxp->next;
- if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
-
vq->vq_avail_idx = idx;
- virtio_wmb(vq->hw->weak_barriers);
- head_dp->flags = head_flags;
+ if (!in_order) {
+ vq->vq_desc_head_idx = dxp->next;
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
+ }
+
+ virtqueue_store_flags_packed(head_dp, head_flags,
+ vq->hw->weak_barriers);
}
static inline void
uint16_t seg_num = cookie->nb_segs;
uint16_t head_idx, idx;
uint16_t head_size = vq->hw->vtnet_hdr_size;
+ bool prepend_header = false;
struct virtio_net_hdr *hdr;
head_idx = vq->vq_desc_head_idx;
idx = head_idx;
- dxp = &vq->vq_descx[idx];
+ if (in_order)
+ dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
+ else
+ dxp = &vq->vq_descx[idx];
dxp->cookie = (void *)cookie;
dxp->ndescs = needed;
- start_dp = vq->vq_ring.desc;
+ start_dp = vq->vq_split.ring.desc;
if (can_push) {
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)
- rte_pktmbuf_prepend(cookie, head_size);
- /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
- * which is wrong. Below subtract restores correct pkt size.
- */
- cookie->pkt_len -= head_size;
+ hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
+ cookie->data_off - head_size;
+ prepend_header = true;
/* if offload disabled, it is not zeroed below, do it now */
- if (!vq->hw->has_tx_offload) {
- ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
- ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
- ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
- ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
- ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
- ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
- }
+ if (!vq->hw->has_tx_offload)
+ virtqueue_clear_net_hdr(hdr);
} else if (use_indirect) {
/* setup tx ring slot to point to indirect
* descriptor list stored in reserved region.
do {
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
start_dp[idx].len = cookie->data_len;
+ if (prepend_header) {
+ start_dp[idx].addr -= head_size;
+ start_dp[idx].len += head_size;
+ prepend_header = false;
+ }
start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
idx = start_dp[idx].next;
} while ((cookie = cookie->next) != NULL);
if (use_indirect)
- idx = vq->vq_ring.desc[head_idx].next;
+ idx = vq->vq_split.ring.desc[head_idx].next;
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
+ const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
PMD_INIT_FUNC_TRACE();
+ if (rx_conf->rx_deferred_start) {
+ PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
+ return -EINVAL;
+ }
+
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
rxvq = &vq->rxq;
rxvq->queue_id = queue_idx;
rxvq->mpool = mp;
- if (rxvq->mpool == NULL) {
- rte_exit(EXIT_FAILURE,
- "Cannot allocate mbufs for rx virtqueue");
- }
-
dev->data->rx_queues[queue_idx] = rxvq;
return 0;
if (hw->use_simple_rx) {
for (desc_idx = 0; desc_idx < vq->vq_nentries;
desc_idx++) {
- vq->vq_ring.avail->ring[desc_idx] = desc_idx;
- vq->vq_ring.desc[desc_idx].flags =
+ vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
+ vq->vq_split.ring.desc[desc_idx].flags =
VRING_DESC_F_WRITE;
}
PMD_INIT_FUNC_TRACE();
+ if (tx_conf->tx_deferred_start) {
+ PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
+ return -EINVAL;
+ }
+
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
if (!vtpci_packed_queue(hw)) {
if (hw->use_inorder_tx)
- vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
+ vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
}
VIRTQUEUE_DUMP(vq);
}
}
-static inline void
-virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
-{
- uint32_t s = mbuf->pkt_len;
- struct ether_addr *ea;
-
- stats->bytes += s;
-
- if (s == 64) {
- stats->size_bins[1]++;
- } else if (s > 64 && s < 1024) {
- uint32_t bin;
-
- /* count zeros, and offset into correct bin */
- bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
- stats->size_bins[bin]++;
- } else {
- if (s < 64)
- stats->size_bins[0]++;
- else if (s < 1519)
- stats->size_bins[6]++;
- else if (s >= 1519)
- stats->size_bins[7]++;
- }
-
- ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
- if (is_multicast_ether_addr(ea)) {
- if (is_broadcast_ether_addr(ea))
- stats->broadcast++;
- else
- stats->multicast++;
- }
-}
-
-static inline void
-virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
-{
- VIRTIO_DUMP_PACKET(m, m->data_len);
-
- virtio_update_packet_stats(&rxvq->stats, m);
-}
-
/* Optionally fill offload information in structure */
static inline int
virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
struct virtnet_rx *rxvq = rx_queue;
struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw = vq->hw;
- struct rte_mbuf *rxm, *new_mbuf;
+ struct rte_mbuf *rxm;
uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rxvq->stats.packets += nb_rx;
/* Allocate new mbuf for the used descriptor */
- while (likely(!virtqueue_full(vq))) {
- new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
- if (unlikely(new_mbuf == NULL)) {
- struct rte_eth_dev *dev
- = &rte_eth_devices[rxvq->port_id];
- dev->data->rx_mbuf_alloc_failed++;
- break;
- }
- error = virtqueue_enqueue_recv_refill(vq, &new_mbuf, 1);
- if (unlikely(error)) {
- rte_pktmbuf_free(new_mbuf);
- break;
+ if (likely(!virtqueue_full(vq))) {
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *new_pkts[free_cnt];
+
+ if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
+ free_cnt) == 0)) {
+ error = virtqueue_enqueue_recv_refill(vq, new_pkts,
+ free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(new_pkts[i]);
+ }
+ nb_enqueued += free_cnt;
+ } else {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed += free_cnt;
}
- nb_enqueued++;
}
if (likely(nb_enqueued)) {
struct virtnet_rx *rxvq = rx_queue;
struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw = vq->hw;
- struct rte_mbuf *rxm, *new_mbuf;
+ struct rte_mbuf *rxm;
uint16_t num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rxvq->stats.packets += nb_rx;
/* Allocate new mbuf for the used descriptor */
- while (likely(!virtqueue_full(vq))) {
- new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
- if (unlikely(new_mbuf == NULL)) {
+ if (likely(!virtqueue_full(vq))) {
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *new_pkts[free_cnt];
+
+ if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
+ free_cnt) == 0)) {
+ error = virtqueue_enqueue_recv_refill_packed(vq,
+ new_pkts, free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(new_pkts[i]);
+ }
+ nb_enqueued += free_cnt;
+ } else {
struct rte_eth_dev *dev =
&rte_eth_devices[rxvq->port_id];
- dev->data->rx_mbuf_alloc_failed++;
- break;
- }
- error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1);
- if (unlikely(error)) {
- rte_pktmbuf_free(new_mbuf);
- break;
+ dev->data->rx_mbuf_alloc_failed += free_cnt;
}
- nb_enqueued++;
}
if (likely(nb_enqueued)) {
struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm;
- struct rte_mbuf *prev;
+ struct rte_mbuf *prev = NULL;
uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf_inorder(vq, rxm);
rxm->data_len = (uint16_t)(len[i]);
rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
- rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
- if (prev)
- prev->next = rxm;
+ prev->next = rxm;
prev = rxm;
seg_res -= 1;
}
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- prev = rcv_pkts[nb_rx];
if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
virtio_rmb(hw->weak_barriers);
num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
prev->next = rxm;
prev = rxm;
rx_pkts[nb_rx]->pkt_len += len[extra_idx];
- rx_pkts[nb_rx]->data_len += len[extra_idx];
extra_idx += 1;
};
seg_res -= rcv_cnt;
} else {
PMD_RX_LOG(ERR,
"No enough segments for packet.");
- virtio_discard_rxbuf_inorder(vq, prev);
+ rte_pktmbuf_free(rx_pkts[nb_rx]);
rxvq->stats.errors++;
break;
}
struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm;
- struct rte_mbuf *prev;
+ struct rte_mbuf *prev = NULL;
uint16_t nb_used, num, nb_rx = 0;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rxm->data_len = (uint16_t)(len[i]);
rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
- rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
- if (prev)
- prev->next = rxm;
+ prev->next = rxm;
prev = rxm;
seg_res -= 1;
}
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- prev = rcv_pkts[nb_rx];
if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
virtio_rmb(hw->weak_barriers);
num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
prev->next = rxm;
prev = rxm;
rx_pkts[nb_rx]->pkt_len += len[extra_idx];
- rx_pkts[nb_rx]->data_len += len[extra_idx];
extra_idx += 1;
};
seg_res -= rcv_cnt;
} else {
PMD_RX_LOG(ERR,
"No enough segments for packet.");
- virtio_discard_rxbuf(vq, prev);
+ rte_pktmbuf_free(rx_pkts[nb_rx]);
rxvq->stats.errors++;
break;
}
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rxm->data_len = (uint16_t)(len[i]);
rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
- rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
- if (prev)
- prev->next = rxm;
+ prev->next = rxm;
prev = rxm;
seg_res -= 1;
}
while (seg_res != 0) {
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- if (likely(vq->vq_free_cnt >= rcv_cnt)) {
- num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
- len, rcv_cnt);
- uint16_t extra_idx = 0;
+ uint16_t extra_idx = 0;
- rcv_cnt = num;
+ rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
+ len, rcv_cnt);
+ if (unlikely(rcv_cnt == 0)) {
+ PMD_RX_LOG(ERR, "No enough segments for packet.");
+ rte_pktmbuf_free(rx_pkts[nb_rx]);
+ rxvq->stats.errors++;
+ break;
+ }
- while (extra_idx < rcv_cnt) {
- rxm = rcv_pkts[extra_idx];
+ while (extra_idx < rcv_cnt) {
+ rxm = rcv_pkts[extra_idx];
- rxm->data_off =
- RTE_PKTMBUF_HEADROOM - hdr_size;
- rxm->pkt_len = (uint32_t)(len[extra_idx]);
- rxm->data_len = (uint16_t)(len[extra_idx]);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[extra_idx]);
+ rxm->data_len = (uint16_t)(len[extra_idx]);
- prev->next = rxm;
- prev = rxm;
- rx_pkts[nb_rx]->pkt_len += len[extra_idx];
- rx_pkts[nb_rx]->data_len += len[extra_idx];
- extra_idx += 1;
- }
- seg_res -= rcv_cnt;
- if (!seg_res) {
- virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
- nb_rx++;
- }
- } else {
- PMD_RX_LOG(ERR,
- "No enough segments for packet.");
- if (prev)
- virtio_discard_rxbuf(vq, prev);
- rxvq->stats.errors++;
- break;
+ prev->next = rxm;
+ prev = rxm;
+ rx_pkts[nb_rx]->pkt_len += len[extra_idx];
+ extra_idx += 1;
+ }
+ seg_res -= rcv_cnt;
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
}
}
return nb_rx;
}
+uint16_t
+virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+ int error;
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *m = tx_pkts[nb_tx];
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ error = rte_validate_tx_offload(m);
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+#endif
+
+ /* Do VLAN tag insertion */
+ if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&m);
+ /* rte_vlan_insert() may change pointer
+ * even in the case of failure
+ */
+ tx_pkts[nb_tx] = m;
+
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+ }
+
+ error = rte_net_intel_cksum_prepare(m);
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+
+ if (m->ol_flags & PKT_TX_TCP_SEG)
+ virtio_tso_fix_cksum(m);
+ }
+
+ return nb_tx;
+}
+
uint16_t
virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_tx = 0;
- int error;
+ bool in_order = hw->use_inorder_tx;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
if (nb_pkts > vq->vq_free_cnt)
- virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt);
+ virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
+ in_order);
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *txm = tx_pkts[nb_tx];
int can_push = 0, slots, need;
- /* Do VLAN tag insertion */
- if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
- error = rte_vlan_insert(&txm);
- if (unlikely(error)) {
- rte_pktmbuf_free(txm);
- continue;
- }
- }
-
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
/* Positive value indicates it need free vring descriptors */
if (unlikely(need > 0)) {
- need = RTE_MIN(need, (int)nb_pkts);
- virtio_xmit_cleanup_packed(vq, need);
+ virtio_xmit_cleanup_packed(vq, need, in_order);
need = slots - vq->vq_free_cnt;
if (unlikely(need > 0)) {
PMD_TX_LOG(ERR,
}
/* Enqueue Packet buffers */
- virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push);
+ if (can_push)
+ virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
+ else
+ virtqueue_enqueue_xmit_packed(txvq, txm, slots, 0,
+ in_order);
virtio_update_packet_stats(&txvq->stats, txm);
}
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_used, nb_tx = 0;
- int error;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
struct rte_mbuf *txm = tx_pkts[nb_tx];
int can_push = 0, use_indirect = 0, slots, need;
- /* Do VLAN tag insertion */
- if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
- error = rte_vlan_insert(&txm);
- if (unlikely(error)) {
- rte_pktmbuf_free(txm);
- continue;
- }
- }
-
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
return nb_tx;
}
+static __rte_always_inline int
+virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
+{
+ uint16_t nb_used, nb_clean, nb_descs;
+ struct virtio_hw *hw = vq->hw;
+
+ nb_descs = vq->vq_free_cnt + need;
+ nb_used = VIRTQUEUE_NUSED(vq);
+ virtio_rmb(hw->weak_barriers);
+ nb_clean = RTE_MIN(need, (int)nb_used);
+
+ virtio_xmit_cleanup_inorder(vq, nb_clean);
+
+ return nb_descs - vq->vq_free_cnt;
+}
+
uint16_t
virtio_xmit_pkts_inorder(void *tx_queue,
struct rte_mbuf **tx_pkts,
struct virtqueue *vq = txvq->vq;
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
- uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
+ uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
struct rte_mbuf *inorder_pkts[nb_pkts];
- int error;
+ int need;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
virtio_xmit_cleanup_inorder(vq, nb_used);
- if (unlikely(!vq->vq_free_cnt))
- virtio_xmit_cleanup_inorder(vq, nb_used);
-
- nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
-
- for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *txm = tx_pkts[nb_tx];
- int slots, need;
-
- /* Do VLAN tag insertion */
- if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
- error = rte_vlan_insert(&txm);
- if (unlikely(error)) {
- rte_pktmbuf_free(txm);
- continue;
- }
- }
+ int slots;
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
inorder_pkts[nb_inorder_pkts] = txm;
nb_inorder_pkts++;
- virtio_update_packet_stats(&txvq->stats, txm);
continue;
}
if (nb_inorder_pkts) {
+ need = nb_inorder_pkts - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ need = virtio_xmit_try_cleanup_inorder(vq,
+ need);
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to "
+ "transmit");
+ break;
+ }
+ }
virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
nb_inorder_pkts);
nb_inorder_pkts = 0;
slots = txm->nb_segs + 1;
need = slots - vq->vq_free_cnt;
if (unlikely(need > 0)) {
- nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb(hw->weak_barriers);
- need = RTE_MIN(need, (int)nb_used);
-
- virtio_xmit_cleanup_inorder(vq, need);
-
- need = slots - vq->vq_free_cnt;
+ need = virtio_xmit_try_cleanup_inorder(vq, slots);
if (unlikely(need > 0)) {
PMD_TX_LOG(ERR,
}
/* Transmit all inorder packets */
- if (nb_inorder_pkts)
+ if (nb_inorder_pkts) {
+ need = nb_inorder_pkts - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ need = virtio_xmit_try_cleanup_inorder(vq,
+ need);
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ nb_inorder_pkts = vq->vq_free_cnt;
+ nb_tx -= need;
+ }
+ }
+
virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
nb_inorder_pkts);
+ }
txvq->stats.packets += nb_tx;