/* common case: header is not fragmented */
if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
m->l4_len)) {
- struct ipv4_hdr *iph;
- struct ipv6_hdr *ip6h;
- struct tcp_hdr *th;
+ struct rte_ipv4_hdr *iph;
+ struct rte_ipv6_hdr *ip6h;
+ struct rte_tcp_hdr *th;
uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
uint32_t tmp;
- iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ iph = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv4_hdr *, m->l2_len);
th = RTE_PTR_ADD(iph, m->l3_len);
if ((iph->version_ihl >> 4) == 4) {
iph->hdr_checksum = 0;
ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
m->l3_len);
} else {
- ip6h = (struct ipv6_hdr *)iph;
+ ip6h = (struct rte_ipv6_hdr *)iph;
ip_paylen = ip6h->payload_len;
}
switch (cookie->ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct udp_hdr,
+ hdr->csum_offset = offsetof(struct rte_udp_hdr,
dgram_cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
case PKT_TX_TCP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
+ hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
/* TCP Segmentation Offload */
if (cookie->ol_flags & PKT_TX_TCP_SEG) {
- virtio_tso_fix_cksum(cookie);
hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
VIRTIO_NET_HDR_GSO_TCPV6 :
VIRTIO_NET_HDR_GSO_TCPV4;
rxvq = &vq->rxq;
rxvq->queue_id = queue_idx;
rxvq->mpool = mp;
- if (rxvq->mpool == NULL) {
- rte_exit(EXIT_FAILURE,
- "Cannot allocate mbufs for rx virtqueue");
- }
-
dev->data->rx_queues[queue_idx] = rxvq;
return 0;
virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
{
uint32_t s = mbuf->pkt_len;
- struct ether_addr *ea;
+ struct rte_ether_addr *ea;
stats->bytes += s;
stats->size_bins[7]++;
}
- ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
- if (is_multicast_ether_addr(ea)) {
- if (is_broadcast_ether_addr(ea))
+ ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
stats->broadcast++;
else
stats->multicast++;
PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm;
- struct rte_mbuf *prev;
+ struct rte_mbuf *prev = NULL;
uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf_inorder(vq, rxm);
rxm->data_len = (uint16_t)(len[i]);
rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
- rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
- if (prev)
- prev->next = rxm;
+ prev->next = rxm;
prev = rxm;
seg_res -= 1;
}
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- prev = rcv_pkts[nb_rx];
if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
virtio_rmb(hw->weak_barriers);
num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
prev->next = rxm;
prev = rxm;
rx_pkts[nb_rx]->pkt_len += len[extra_idx];
- rx_pkts[nb_rx]->data_len += len[extra_idx];
extra_idx += 1;
};
seg_res -= rcv_cnt;
} else {
PMD_RX_LOG(ERR,
"No enough segments for packet.");
- virtio_discard_rxbuf_inorder(vq, prev);
+ rte_pktmbuf_free(rx_pkts[nb_rx]);
rxvq->stats.errors++;
break;
}
struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm;
- struct rte_mbuf *prev;
+ struct rte_mbuf *prev = NULL;
uint16_t nb_used, num, nb_rx = 0;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rxm->data_len = (uint16_t)(len[i]);
rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
- rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
- if (prev)
- prev->next = rxm;
+ prev->next = rxm;
prev = rxm;
seg_res -= 1;
}
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- prev = rcv_pkts[nb_rx];
if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
virtio_rmb(hw->weak_barriers);
num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
prev->next = rxm;
prev = rxm;
rx_pkts[nb_rx]->pkt_len += len[extra_idx];
- rx_pkts[nb_rx]->data_len += len[extra_idx];
extra_idx += 1;
};
seg_res -= rcv_cnt;
} else {
PMD_RX_LOG(ERR,
"No enough segments for packet.");
- virtio_discard_rxbuf(vq, prev);
+ rte_pktmbuf_free(rx_pkts[nb_rx]);
rxvq->stats.errors++;
break;
}
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rxm->data_len = (uint16_t)(len[i]);
rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
- rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
- if (prev)
- prev->next = rxm;
+ prev->next = rxm;
prev = rxm;
seg_res -= 1;
}
while (seg_res != 0) {
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- if (likely(vq->vq_free_cnt >= rcv_cnt)) {
- num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
- len, rcv_cnt);
- uint16_t extra_idx = 0;
+ uint16_t extra_idx = 0;
- rcv_cnt = num;
+ rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
+ len, rcv_cnt);
+ if (unlikely(rcv_cnt == 0)) {
+ PMD_RX_LOG(ERR, "No enough segments for packet.");
+ rte_pktmbuf_free(rx_pkts[nb_rx]);
+ rxvq->stats.errors++;
+ break;
+ }
- while (extra_idx < rcv_cnt) {
- rxm = rcv_pkts[extra_idx];
+ while (extra_idx < rcv_cnt) {
+ rxm = rcv_pkts[extra_idx];
- rxm->data_off =
- RTE_PKTMBUF_HEADROOM - hdr_size;
- rxm->pkt_len = (uint32_t)(len[extra_idx]);
- rxm->data_len = (uint16_t)(len[extra_idx]);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[extra_idx]);
+ rxm->data_len = (uint16_t)(len[extra_idx]);
- prev->next = rxm;
- prev = rxm;
- rx_pkts[nb_rx]->pkt_len += len[extra_idx];
- rx_pkts[nb_rx]->data_len += len[extra_idx];
- extra_idx += 1;
- }
- seg_res -= rcv_cnt;
- if (!seg_res) {
- virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
- nb_rx++;
- }
- } else {
- PMD_RX_LOG(ERR,
- "No enough segments for packet.");
- if (prev)
- virtio_discard_rxbuf(vq, prev);
- rxvq->stats.errors++;
- break;
+ prev->next = rxm;
+ prev = rxm;
+ rx_pkts[nb_rx]->pkt_len += len[extra_idx];
+ extra_idx += 1;
+ }
+ seg_res -= rcv_cnt;
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
}
}
return nb_rx;
}
+uint16_t
+virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+ int error;
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *m = tx_pkts[nb_tx];
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ error = rte_validate_tx_offload(m);
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+#endif
+
+ /* Do VLAN tag insertion */
+ if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&m);
+ /* rte_vlan_insert() may change pointer
+ * even in the case of failure
+ */
+ tx_pkts[nb_tx] = m;
+
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+ }
+
+ error = rte_net_intel_cksum_prepare(m);
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+
+ if (m->ol_flags & PKT_TX_TCP_SEG)
+ virtio_tso_fix_cksum(m);
+ }
+
+ return nb_tx;
+}
+
uint16_t
virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_tx = 0;
bool in_order = hw->use_inorder_tx;
- int error;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
struct rte_mbuf *txm = tx_pkts[nb_tx];
int can_push = 0, slots, need;
- /* Do VLAN tag insertion */
- if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
- error = rte_vlan_insert(&txm);
- if (unlikely(error)) {
- rte_pktmbuf_free(txm);
- continue;
- }
- }
-
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_used, nb_tx = 0;
- int error;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
struct rte_mbuf *txm = tx_pkts[nb_tx];
int can_push = 0, use_indirect = 0, slots, need;
- /* Do VLAN tag insertion */
- if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
- error = rte_vlan_insert(&txm);
- if (unlikely(error)) {
- rte_pktmbuf_free(txm);
- continue;
- }
- }
-
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
struct rte_mbuf *inorder_pkts[nb_pkts];
- int error;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
struct rte_mbuf *txm = tx_pkts[nb_tx];
int slots, need;
- /* Do VLAN tag insertion */
- if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
- error = rte_vlan_insert(&txm);
- if (unlikely(error)) {
- rte_pktmbuf_free(txm);
- continue;
- }
- }
-
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&