/* TCP Segmentation Offload */
if (cookie->ol_flags & PKT_TX_TCP_SEG) {
- virtio_tso_fix_cksum(cookie);
hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
VIRTIO_NET_HDR_GSO_TCPV6 :
VIRTIO_NET_HDR_GSO_TCPV4;
struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm;
- struct rte_mbuf *prev;
+ struct rte_mbuf *prev = NULL;
uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
rxm->data_len = (uint16_t)(len[i]);
rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
- rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
- if (prev)
- prev->next = rxm;
+ prev->next = rxm;
prev = rxm;
seg_res -= 1;
}
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- prev = rcv_pkts[nb_rx];
if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
virtio_rmb(hw->weak_barriers);
num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
prev->next = rxm;
prev = rxm;
rx_pkts[nb_rx]->pkt_len += len[extra_idx];
- rx_pkts[nb_rx]->data_len += len[extra_idx];
extra_idx += 1;
};
seg_res -= rcv_cnt;
} else {
PMD_RX_LOG(ERR,
"No enough segments for packet.");
- virtio_discard_rxbuf_inorder(vq, prev);
+ rte_pktmbuf_free(rx_pkts[nb_rx]);
rxvq->stats.errors++;
break;
}
struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm;
- struct rte_mbuf *prev;
+ struct rte_mbuf *prev = NULL;
uint16_t nb_used, num, nb_rx = 0;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
rxm->data_len = (uint16_t)(len[i]);
rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
- rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
- if (prev)
- prev->next = rxm;
+ prev->next = rxm;
prev = rxm;
seg_res -= 1;
}
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- prev = rcv_pkts[nb_rx];
if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
virtio_rmb(hw->weak_barriers);
num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
prev->next = rxm;
prev = rxm;
rx_pkts[nb_rx]->pkt_len += len[extra_idx];
- rx_pkts[nb_rx]->data_len += len[extra_idx];
extra_idx += 1;
};
seg_res -= rcv_cnt;
} else {
PMD_RX_LOG(ERR,
"No enough segments for packet.");
- virtio_discard_rxbuf(vq, prev);
+ rte_pktmbuf_free(rx_pkts[nb_rx]);
rxvq->stats.errors++;
break;
}
rxm->data_len = (uint16_t)(len[i]);
rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
- rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
- if (prev)
- prev->next = rxm;
+ prev->next = rxm;
prev = rxm;
seg_res -= 1;
}
while (seg_res != 0) {
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- if (likely(vq->vq_free_cnt >= rcv_cnt)) {
- num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
- len, rcv_cnt);
- uint16_t extra_idx = 0;
+ uint16_t extra_idx = 0;
- rcv_cnt = num;
+ rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
+ len, rcv_cnt);
+ if (unlikely(rcv_cnt == 0)) {
+ PMD_RX_LOG(ERR, "No enough segments for packet.");
+ rte_pktmbuf_free(rx_pkts[nb_rx]);
+ rxvq->stats.errors++;
+ break;
+ }
- while (extra_idx < rcv_cnt) {
- rxm = rcv_pkts[extra_idx];
+ while (extra_idx < rcv_cnt) {
+ rxm = rcv_pkts[extra_idx];
- rxm->data_off =
- RTE_PKTMBUF_HEADROOM - hdr_size;
- rxm->pkt_len = (uint32_t)(len[extra_idx]);
- rxm->data_len = (uint16_t)(len[extra_idx]);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[extra_idx]);
+ rxm->data_len = (uint16_t)(len[extra_idx]);
- prev->next = rxm;
- prev = rxm;
- rx_pkts[nb_rx]->pkt_len += len[extra_idx];
- rx_pkts[nb_rx]->data_len += len[extra_idx];
- extra_idx += 1;
- }
- seg_res -= rcv_cnt;
- if (!seg_res) {
- virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
- nb_rx++;
- }
- } else {
- PMD_RX_LOG(ERR,
- "No enough segments for packet.");
- if (prev)
- virtio_discard_rxbuf(vq, prev);
- rxvq->stats.errors++;
- break;
+ prev->next = rxm;
+ prev = rxm;
+ rx_pkts[nb_rx]->pkt_len += len[extra_idx];
+ extra_idx += 1;
+ }
+ seg_res -= rcv_cnt;
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
}
}
return nb_rx;
}
+uint16_t
+virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+ int error;
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *m = tx_pkts[nb_tx];
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ error = rte_validate_tx_offload(m);
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+#endif
+
+ /* Do VLAN tag insertion */
+ if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&m);
+ /* rte_vlan_insert() may change pointer
+ * even in the case of failure
+ */
+ tx_pkts[nb_tx] = m;
+
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+ }
+
+ error = rte_net_intel_cksum_prepare(m);
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+
+ if (m->ol_flags & PKT_TX_TCP_SEG)
+ virtio_tso_fix_cksum(m);
+ }
+
+ return nb_tx;
+}
+
uint16_t
virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_tx = 0;
bool in_order = hw->use_inorder_tx;
- int error;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
struct rte_mbuf *txm = tx_pkts[nb_tx];
int can_push = 0, slots, need;
- /* Do VLAN tag insertion */
- if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
- error = rte_vlan_insert(&txm);
- if (unlikely(error)) {
- rte_pktmbuf_free(txm);
- continue;
- }
- /* vlan_insert may add a header mbuf */
- tx_pkts[nb_tx] = txm;
- }
-
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_used, nb_tx = 0;
- int error;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
struct rte_mbuf *txm = tx_pkts[nb_tx];
int can_push = 0, use_indirect = 0, slots, need;
- /* Do VLAN tag insertion */
- if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
- error = rte_vlan_insert(&txm);
- if (unlikely(error)) {
- rte_pktmbuf_free(txm);
- continue;
- }
- /* vlan_insert may add a header mbuf */
- tx_pkts[nb_tx] = txm;
- }
-
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
struct rte_mbuf *inorder_pkts[nb_pkts];
- int error;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
struct rte_mbuf *txm = tx_pkts[nb_tx];
int slots, need;
- /* Do VLAN tag insertion */
- if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
- error = rte_vlan_insert(&txm);
- if (unlikely(error)) {
- rte_pktmbuf_free(txm);
- continue;
- }
- /* vlan_insert may add a header mbuf */
- tx_pkts[nb_tx] = txm;
- }
-
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&