dxp->next = VQ_RING_DESC_CHAIN_END;
}
+void
+virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
+{
+ uint32_t s = mbuf->pkt_len;
+ struct rte_ether_addr *ea;
+
+ stats->bytes += s;
+
+ if (s == 64) {
+ stats->size_bins[1]++;
+ } else if (s > 64 && s < 1024) {
+ uint32_t bin;
+
+ /* count zeros, and offset into correct bin */
+ bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
+ stats->size_bins[bin]++;
+ } else {
+ if (s < 64)
+ stats->size_bins[0]++;
+ else if (s < 1519)
+ stats->size_bins[6]++;
+ else
+ stats->size_bins[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
+ stats->broadcast++;
+ else
+ stats->multicast++;
+ }
+}
+
+static inline void
+virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
+{
+ VIRTIO_DUMP_PACKET(m, m->data_len);
+
+ virtio_update_packet_stats(&rxvq->stats, m);
+}
+
static uint16_t
virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
struct rte_mbuf **rx_pkts,
for (i = 0; i < num; i++) {
used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ * and wait for used desc in virtqueue.
+ */
if (!desc_is_used(&desc[used_idx], vq))
return i;
- virtio_rmb(vq->hw->weak_barriers);
len[i] = desc[used_idx].len;
id = desc[used_idx].id;
cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ * and wait for used desc in virtqueue.
+ */
while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
- virtio_rmb(vq->hw->weak_barriers);
id = desc[used_idx].id;
do {
curr_id = used_idx;
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ * and wait for used desc in virtqueue.
+ */
while (num-- && desc_is_used(&desc[used_idx], vq)) {
- virtio_rmb(vq->hw->weak_barriers);
id = desc[used_idx].id;
dxp = &vq->vq_descx[id];
vq->vq_used_cons_idx += dxp->ndescs;
}
/* Cleanup from completed inorder transmits. */
-static void
+static __rte_always_inline void
virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
{
uint16_t i, idx = vq->vq_used_cons_idx;
vq->vq_desc_head_idx = dxp->next;
if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
- virtio_wmb(hw->weak_barriers);
- start_dp[idx].flags = flags;
+
+ virtqueue_store_flags_packed(&start_dp[idx], flags,
+ hw->weak_barriers);
+
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->vq_packed.cached_flags ^=
- VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
+ VRING_PACKED_DESC_F_AVAIL_USED;
flags = vq->vq_packed.cached_flags;
}
}
/* common case: header is not fragmented */
if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
m->l4_len)) {
- struct ipv4_hdr *iph;
- struct ipv6_hdr *ip6h;
- struct tcp_hdr *th;
+ struct rte_ipv4_hdr *iph;
+ struct rte_ipv6_hdr *ip6h;
+ struct rte_tcp_hdr *th;
uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
uint32_t tmp;
- iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ iph = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv4_hdr *, m->l2_len);
th = RTE_PTR_ADD(iph, m->l3_len);
if ((iph->version_ihl >> 4) == 4) {
iph->hdr_checksum = 0;
ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
m->l3_len);
} else {
- ip6h = (struct ipv6_hdr *)iph;
+ ip6h = (struct rte_ipv6_hdr *)iph;
ip_paylen = ip6h->payload_len;
}
switch (cookie->ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct udp_hdr,
+ hdr->csum_offset = offsetof(struct rte_udp_hdr,
dgram_cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
case PKT_TX_TCP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
+ hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
/* TCP Segmentation Offload */
if (cookie->ol_flags & PKT_TX_TCP_SEG) {
- virtio_tso_fix_cksum(cookie);
hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
VIRTIO_NET_HDR_GSO_TCPV6 :
VIRTIO_NET_HDR_GSO_TCPV4;
dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
dxp->cookie = (void *)cookies[i];
dxp->ndescs = 1;
+ virtio_update_packet_stats(&txvq->stats, cookies[i]);
- hdr = (struct virtio_net_hdr *)
- rte_pktmbuf_prepend(cookies[i], head_size);
- cookies[i]->pkt_len -= head_size;
+ hdr = (struct virtio_net_hdr *)(char *)cookies[i]->buf_addr +
+ cookies[i]->data_off - head_size;
/* if offload disabled, hdr is not zeroed yet, do it now */
if (!vq->hw->has_tx_offload)
else
virtqueue_xmit_offload(hdr, cookies[i], true);
- start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
- start_dp[idx].len = cookies[i]->data_len;
+ start_dp[idx].addr =
+ VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
+ start_dp[idx].len = cookies[i]->data_len + head_size;
start_dp[idx].flags = 0;
+
vq_update_avail_ring(vq, idx);
idx++;
flags = vq->vq_packed.cached_flags;
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)
- rte_pktmbuf_prepend(cookie, head_size);
- cookie->pkt_len -= head_size;
+ hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
+ cookie->data_off - head_size;
/* if offload disabled, hdr is not zeroed yet, do it now */
if (!vq->hw->has_tx_offload)
else
virtqueue_xmit_offload(hdr, cookie, true);
- dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
- dp->len = cookie->data_len;
+ dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
+ dp->len = cookie->data_len + head_size;
dp->id = id;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^=
- VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
+ vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
}
vq->vq_free_cnt--;
vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
}
- virtio_wmb(vq->hw->weak_barriers);
- dp->flags = flags;
+ virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
}
static inline void
uint16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
uint16_t prev;
+ bool prepend_header = false;
id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
if (can_push) {
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)
- rte_pktmbuf_prepend(cookie, head_size);
- /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
- * which is wrong. Below subtract restores correct pkt size.
- */
- cookie->pkt_len -= head_size;
+ hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
+ cookie->data_off - head_size;
+ prepend_header = true;
/* if offload disabled, it is not zeroed below, do it now */
if (!vq->hw->has_tx_offload)
if (idx >= vq->vq_nentries) {
idx -= vq->vq_nentries;
vq->vq_packed.cached_flags ^=
- VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
+ VRING_PACKED_DESC_F_AVAIL_USED;
}
}
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
start_dp[idx].len = cookie->data_len;
+ if (prepend_header) {
+ start_dp[idx].addr -= head_size;
+ start_dp[idx].len += head_size;
+ prepend_header = false;
+ }
+
if (likely(idx != head_idx)) {
flags = cookie->next ? VRING_DESC_F_NEXT : 0;
flags |= vq->vq_packed.cached_flags;
if (idx >= vq->vq_nentries) {
idx -= vq->vq_nentries;
vq->vq_packed.cached_flags ^=
- VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
+ VRING_PACKED_DESC_F_AVAIL_USED;
}
} while ((cookie = cookie->next) != NULL);
vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
}
- virtio_wmb(vq->hw->weak_barriers);
- head_dp->flags = head_flags;
+ virtqueue_store_flags_packed(head_dp, head_flags,
+ vq->hw->weak_barriers);
}
static inline void
uint16_t seg_num = cookie->nb_segs;
uint16_t head_idx, idx;
uint16_t head_size = vq->hw->vtnet_hdr_size;
+ bool prepend_header = false;
struct virtio_net_hdr *hdr;
head_idx = vq->vq_desc_head_idx;
if (can_push) {
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)
- rte_pktmbuf_prepend(cookie, head_size);
- /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
- * which is wrong. Below subtract restores correct pkt size.
- */
- cookie->pkt_len -= head_size;
+ hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
+ cookie->data_off - head_size;
+ prepend_header = true;
/* if offload disabled, it is not zeroed below, do it now */
if (!vq->hw->has_tx_offload)
do {
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
start_dp[idx].len = cookie->data_len;
+ if (prepend_header) {
+ start_dp[idx].addr -= head_size;
+ start_dp[idx].len += head_size;
+ prepend_header = false;
+ }
start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
idx = start_dp[idx].next;
} while ((cookie = cookie->next) != NULL);
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
+ const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
PMD_INIT_FUNC_TRACE();
+ if (rx_conf->rx_deferred_start) {
+ PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
+ return -EINVAL;
+ }
+
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
rxvq = &vq->rxq;
rxvq->queue_id = queue_idx;
rxvq->mpool = mp;
- if (rxvq->mpool == NULL) {
- rte_exit(EXIT_FAILURE,
- "Cannot allocate mbufs for rx virtqueue");
- }
-
dev->data->rx_queues[queue_idx] = rxvq;
return 0;
}
}
-static inline void
-virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
-{
- uint32_t s = mbuf->pkt_len;
- struct ether_addr *ea;
-
- stats->bytes += s;
-
- if (s == 64) {
- stats->size_bins[1]++;
- } else if (s > 64 && s < 1024) {
- uint32_t bin;
-
- /* count zeros, and offset into correct bin */
- bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
- stats->size_bins[bin]++;
- } else {
- if (s < 64)
- stats->size_bins[0]++;
- else if (s < 1519)
- stats->size_bins[6]++;
- else if (s >= 1519)
- stats->size_bins[7]++;
- }
-
- ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
- if (is_multicast_ether_addr(ea)) {
- if (is_broadcast_ether_addr(ea))
- stats->broadcast++;
- else
- stats->multicast++;
- }
-}
-
-static inline void
-virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
-{
- VIRTIO_DUMP_PACKET(m, m->data_len);
-
- virtio_update_packet_stats(&rxvq->stats, m);
-}
-
/* Optionally fill offload information in structure */
static inline int
virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm;
- struct rte_mbuf *prev;
+ struct rte_mbuf *prev = NULL;
uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf_inorder(vq, rxm);
rxm->data_len = (uint16_t)(len[i]);
rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
- rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
- if (prev)
- prev->next = rxm;
+ prev->next = rxm;
prev = rxm;
seg_res -= 1;
}
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- prev = rcv_pkts[nb_rx];
if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
virtio_rmb(hw->weak_barriers);
num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
prev->next = rxm;
prev = rxm;
rx_pkts[nb_rx]->pkt_len += len[extra_idx];
- rx_pkts[nb_rx]->data_len += len[extra_idx];
extra_idx += 1;
};
seg_res -= rcv_cnt;
} else {
PMD_RX_LOG(ERR,
"No enough segments for packet.");
- virtio_discard_rxbuf_inorder(vq, prev);
+ rte_pktmbuf_free(rx_pkts[nb_rx]);
rxvq->stats.errors++;
break;
}
struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm;
- struct rte_mbuf *prev;
+ struct rte_mbuf *prev = NULL;
uint16_t nb_used, num, nb_rx = 0;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rxm->data_len = (uint16_t)(len[i]);
rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
- rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
- if (prev)
- prev->next = rxm;
+ prev->next = rxm;
prev = rxm;
seg_res -= 1;
}
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- prev = rcv_pkts[nb_rx];
if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
virtio_rmb(hw->weak_barriers);
num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
prev->next = rxm;
prev = rxm;
rx_pkts[nb_rx]->pkt_len += len[extra_idx];
- rx_pkts[nb_rx]->data_len += len[extra_idx];
extra_idx += 1;
};
seg_res -= rcv_cnt;
} else {
PMD_RX_LOG(ERR,
"No enough segments for packet.");
- virtio_discard_rxbuf(vq, prev);
+ rte_pktmbuf_free(rx_pkts[nb_rx]);
rxvq->stats.errors++;
break;
}
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rxm->data_len = (uint16_t)(len[i]);
rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
- rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
- if (prev)
- prev->next = rxm;
+ prev->next = rxm;
prev = rxm;
seg_res -= 1;
}
while (seg_res != 0) {
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- if (likely(vq->vq_free_cnt >= rcv_cnt)) {
- num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
- len, rcv_cnt);
- uint16_t extra_idx = 0;
+ uint16_t extra_idx = 0;
- rcv_cnt = num;
+ rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
+ len, rcv_cnt);
+ if (unlikely(rcv_cnt == 0)) {
+ PMD_RX_LOG(ERR, "No enough segments for packet.");
+ rte_pktmbuf_free(rx_pkts[nb_rx]);
+ rxvq->stats.errors++;
+ break;
+ }
- while (extra_idx < rcv_cnt) {
- rxm = rcv_pkts[extra_idx];
+ while (extra_idx < rcv_cnt) {
+ rxm = rcv_pkts[extra_idx];
- rxm->data_off =
- RTE_PKTMBUF_HEADROOM - hdr_size;
- rxm->pkt_len = (uint32_t)(len[extra_idx]);
- rxm->data_len = (uint16_t)(len[extra_idx]);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[extra_idx]);
+ rxm->data_len = (uint16_t)(len[extra_idx]);
- prev->next = rxm;
- prev = rxm;
- rx_pkts[nb_rx]->pkt_len += len[extra_idx];
- rx_pkts[nb_rx]->data_len += len[extra_idx];
- extra_idx += 1;
- }
- seg_res -= rcv_cnt;
- if (!seg_res) {
- virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
- nb_rx++;
- }
- } else {
- PMD_RX_LOG(ERR,
- "No enough segments for packet.");
- if (prev)
- virtio_discard_rxbuf(vq, prev);
- rxvq->stats.errors++;
- break;
+ prev->next = rxm;
+ prev = rxm;
+ rx_pkts[nb_rx]->pkt_len += len[extra_idx];
+ extra_idx += 1;
+ }
+ seg_res -= rcv_cnt;
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
}
}
return nb_rx;
}
+uint16_t
+virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+ int error;
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *m = tx_pkts[nb_tx];
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ error = rte_validate_tx_offload(m);
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+#endif
+
+ /* Do VLAN tag insertion */
+ if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&m);
+ /* rte_vlan_insert() may change pointer
+ * even in the case of failure
+ */
+ tx_pkts[nb_tx] = m;
+
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+ }
+
+ error = rte_net_intel_cksum_prepare(m);
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+
+ if (m->ol_flags & PKT_TX_TCP_SEG)
+ virtio_tso_fix_cksum(m);
+ }
+
+ return nb_tx;
+}
+
uint16_t
virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_tx = 0;
bool in_order = hw->use_inorder_tx;
- int error;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
struct rte_mbuf *txm = tx_pkts[nb_tx];
int can_push = 0, slots, need;
- /* Do VLAN tag insertion */
- if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
- error = rte_vlan_insert(&txm);
- if (unlikely(error)) {
- rte_pktmbuf_free(txm);
- continue;
- }
- }
-
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_used, nb_tx = 0;
- int error;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
struct rte_mbuf *txm = tx_pkts[nb_tx];
int can_push = 0, use_indirect = 0, slots, need;
- /* Do VLAN tag insertion */
- if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
- error = rte_vlan_insert(&txm);
- if (unlikely(error)) {
- rte_pktmbuf_free(txm);
- continue;
- }
- }
-
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
return nb_tx;
}
+static __rte_always_inline int
+virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
+{
+ uint16_t nb_used, nb_clean, nb_descs;
+ struct virtio_hw *hw = vq->hw;
+
+ nb_descs = vq->vq_free_cnt + need;
+ nb_used = VIRTQUEUE_NUSED(vq);
+ virtio_rmb(hw->weak_barriers);
+ nb_clean = RTE_MIN(need, (int)nb_used);
+
+ virtio_xmit_cleanup_inorder(vq, nb_clean);
+
+ return nb_descs - vq->vq_free_cnt;
+}
+
uint16_t
virtio_xmit_pkts_inorder(void *tx_queue,
struct rte_mbuf **tx_pkts,
struct virtqueue *vq = txvq->vq;
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
- uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
+ uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
struct rte_mbuf *inorder_pkts[nb_pkts];
- int error;
+ int need;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
virtio_xmit_cleanup_inorder(vq, nb_used);
- if (unlikely(!vq->vq_free_cnt))
- virtio_xmit_cleanup_inorder(vq, nb_used);
-
- nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
-
- for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *txm = tx_pkts[nb_tx];
- int slots, need;
-
- /* Do VLAN tag insertion */
- if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
- error = rte_vlan_insert(&txm);
- if (unlikely(error)) {
- rte_pktmbuf_free(txm);
- continue;
- }
- }
+ int slots;
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
inorder_pkts[nb_inorder_pkts] = txm;
nb_inorder_pkts++;
- virtio_update_packet_stats(&txvq->stats, txm);
continue;
}
if (nb_inorder_pkts) {
+ need = nb_inorder_pkts - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ need = virtio_xmit_try_cleanup_inorder(vq,
+ need);
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to "
+ "transmit");
+ break;
+ }
+ }
virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
nb_inorder_pkts);
nb_inorder_pkts = 0;
slots = txm->nb_segs + 1;
need = slots - vq->vq_free_cnt;
if (unlikely(need > 0)) {
- nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb(hw->weak_barriers);
- need = RTE_MIN(need, (int)nb_used);
-
- virtio_xmit_cleanup_inorder(vq, need);
-
- need = slots - vq->vq_free_cnt;
+ need = virtio_xmit_try_cleanup_inorder(vq, slots);
if (unlikely(need > 0)) {
PMD_TX_LOG(ERR,
}
/* Transmit all inorder packets */
- if (nb_inorder_pkts)
+ if (nb_inorder_pkts) {
+ need = nb_inorder_pkts - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ need = virtio_xmit_try_cleanup_inorder(vq,
+ need);
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ nb_inorder_pkts = vq->vq_free_cnt;
+ nb_tx -= need;
+ }
+ }
+
virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
nb_inorder_pkts);
+ }
txvq->stats.packets += nb_tx;