X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_rxtx.c;h=27ead19fbe81d12b96186698d38933376b79e890;hb=e876497c4f6baef09fbee14b702025b67f4fed1c;hp=3805e217a602953f852dec05a900d50b84c37228;hpb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;p=dpdk.git diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 3805e217a6..27ead19fbe 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -479,13 +479,14 @@ virtio_tso_fix_cksum(struct rte_mbuf *m) /* common case: header is not fragmented */ if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len + m->l4_len)) { - struct ipv4_hdr *iph; - struct ipv6_hdr *ip6h; - struct tcp_hdr *th; + struct rte_ipv4_hdr *iph; + struct rte_ipv6_hdr *ip6h; + struct rte_tcp_hdr *th; uint16_t prev_cksum, new_cksum, ip_len, ip_paylen; uint32_t tmp; - iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len); + iph = rte_pktmbuf_mtod_offset(m, + struct rte_ipv4_hdr *, m->l2_len); th = RTE_PTR_ADD(iph, m->l3_len); if ((iph->version_ihl >> 4) == 4) { iph->hdr_checksum = 0; @@ -494,7 +495,7 @@ virtio_tso_fix_cksum(struct rte_mbuf *m) ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) - m->l3_len); } else { - ip6h = (struct ipv6_hdr *)iph; + ip6h = (struct rte_ipv6_hdr *)iph; ip_paylen = ip6h->payload_len; } @@ -538,14 +539,14 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr, switch (cookie->ol_flags & PKT_TX_L4_MASK) { case PKT_TX_UDP_CKSUM: hdr->csum_start = cookie->l2_len + cookie->l3_len; - hdr->csum_offset = offsetof(struct udp_hdr, + hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum); hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; break; case PKT_TX_TCP_CKSUM: hdr->csum_start = cookie->l2_len + cookie->l3_len; - hdr->csum_offset = offsetof(struct tcp_hdr, cksum); + hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum); hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; break; @@ -558,7 +559,6 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr, /* TCP Segmentation Offload */ if (cookie->ol_flags & PKT_TX_TCP_SEG) { - virtio_tso_fix_cksum(cookie); hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ? VIRTIO_NET_HDR_GSO_TCPV6 : VIRTIO_NET_HDR_GSO_TCPV4; @@ -889,11 +889,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, rxvq = &vq->rxq; rxvq->queue_id = queue_idx; rxvq->mpool = mp; - if (rxvq->mpool == NULL) { - rte_exit(EXIT_FAILURE, - "Cannot allocate mbufs for rx virtqueue"); - } - dev->data->rx_queues[queue_idx] = rxvq; return 0; @@ -1423,7 +1418,7 @@ virtio_recv_pkts_inorder(void *rx_queue, struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw = vq->hw; struct rte_mbuf *rxm; - struct rte_mbuf *prev; + struct rte_mbuf *prev = NULL; uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; @@ -1515,11 +1510,8 @@ virtio_recv_pkts_inorder(void *rx_queue, rxm->data_len = (uint16_t)(len[i]); rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); - rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]); - - if (prev) - prev->next = rxm; + prev->next = rxm; prev = rxm; seg_res -= 1; } @@ -1535,7 +1527,6 @@ virtio_recv_pkts_inorder(void *rx_queue, uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, VIRTIO_MBUF_BURST_SZ); - prev = rcv_pkts[nb_rx]; if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) { virtio_rmb(hw->weak_barriers); num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, @@ -1552,7 +1543,6 @@ virtio_recv_pkts_inorder(void *rx_queue, prev->next = rxm; prev = rxm; rx_pkts[nb_rx]->pkt_len += len[extra_idx]; - rx_pkts[nb_rx]->data_len += len[extra_idx]; extra_idx += 1; }; seg_res -= rcv_cnt; @@ -1564,7 +1554,7 @@ virtio_recv_pkts_inorder(void *rx_queue, } else { PMD_RX_LOG(ERR, "No enough segments for packet."); - virtio_discard_rxbuf_inorder(vq, prev); + rte_pktmbuf_free(rx_pkts[nb_rx]); rxvq->stats.errors++; break; } @@ -1615,7 +1605,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw = vq->hw; struct rte_mbuf *rxm; - struct rte_mbuf *prev; + struct rte_mbuf *prev = NULL; uint16_t nb_used, num, nb_rx = 0; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; @@ -1702,11 +1692,8 @@ virtio_recv_mergeable_pkts(void *rx_queue, rxm->data_len = (uint16_t)(len[i]); rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); - rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]); - - if (prev) - prev->next = rxm; + prev->next = rxm; prev = rxm; seg_res -= 1; } @@ -1722,7 +1709,6 @@ virtio_recv_mergeable_pkts(void *rx_queue, uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, VIRTIO_MBUF_BURST_SZ); - prev = rcv_pkts[nb_rx]; if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) { virtio_rmb(hw->weak_barriers); num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, @@ -1739,7 +1725,6 @@ virtio_recv_mergeable_pkts(void *rx_queue, prev->next = rxm; prev = rxm; rx_pkts[nb_rx]->pkt_len += len[extra_idx]; - rx_pkts[nb_rx]->data_len += len[extra_idx]; extra_idx += 1; }; seg_res -= rcv_cnt; @@ -1751,7 +1736,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, } else { PMD_RX_LOG(ERR, "No enough segments for packet."); - virtio_discard_rxbuf(vq, prev); + rte_pktmbuf_free(rx_pkts[nb_rx]); rxvq->stats.errors++; break; } @@ -1880,11 +1865,8 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue, rxm->data_len = (uint16_t)(len[i]); rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); - rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]); - - if (prev) - prev->next = rxm; + prev->next = rxm; prev = rxm; seg_res -= 1; } @@ -1899,39 +1881,33 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue, while (seg_res != 0) { uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, VIRTIO_MBUF_BURST_SZ); - if (likely(vq->vq_free_cnt >= rcv_cnt)) { - num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, - len, rcv_cnt); - uint16_t extra_idx = 0; + uint16_t extra_idx = 0; - rcv_cnt = num; + rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, + len, rcv_cnt); + if (unlikely(rcv_cnt == 0)) { + PMD_RX_LOG(ERR, "No enough segments for packet."); + rte_pktmbuf_free(rx_pkts[nb_rx]); + rxvq->stats.errors++; + break; + } - while (extra_idx < rcv_cnt) { - rxm = rcv_pkts[extra_idx]; + while (extra_idx < rcv_cnt) { + rxm = rcv_pkts[extra_idx]; - rxm->data_off = - RTE_PKTMBUF_HEADROOM - hdr_size; - rxm->pkt_len = (uint32_t)(len[extra_idx]); - rxm->data_len = (uint16_t)(len[extra_idx]); + rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; + rxm->pkt_len = (uint32_t)(len[extra_idx]); + rxm->data_len = (uint16_t)(len[extra_idx]); - prev->next = rxm; - prev = rxm; - rx_pkts[nb_rx]->pkt_len += len[extra_idx]; - rx_pkts[nb_rx]->data_len += len[extra_idx]; - extra_idx += 1; - } - seg_res -= rcv_cnt; - if (!seg_res) { - virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); - nb_rx++; - } - } else { - PMD_RX_LOG(ERR, - "No enough segments for packet."); - if (prev) - virtio_discard_rxbuf(vq, prev); - rxvq->stats.errors++; - break; + prev->next = rxm; + prev = rxm; + rx_pkts[nb_rx]->pkt_len += len[extra_idx]; + extra_idx += 1; + } + seg_res -= rcv_cnt; + if (!seg_res) { + virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); + nb_rx++; } } @@ -1968,6 +1944,51 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue, return nb_rx; } +uint16_t +virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx; + int error; + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *m = tx_pkts[nb_tx]; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + error = rte_validate_tx_offload(m); + if (unlikely(error)) { + rte_errno = -error; + break; + } +#endif + + /* Do VLAN tag insertion */ + if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) { + error = rte_vlan_insert(&m); + /* rte_vlan_insert() may change pointer + * even in the case of failure + */ + tx_pkts[nb_tx] = m; + + if (unlikely(error)) { + rte_errno = -error; + break; + } + } + + error = rte_net_intel_cksum_prepare(m); + if (unlikely(error)) { + rte_errno = -error; + break; + } + + if (m->ol_flags & PKT_TX_TCP_SEG) + virtio_tso_fix_cksum(m); + } + + return nb_tx; +} + uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -1978,7 +1999,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t hdr_size = hw->vtnet_hdr_size; uint16_t nb_tx = 0; bool in_order = hw->use_inorder_tx; - int error; if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) return nb_tx; @@ -1996,17 +2016,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, struct rte_mbuf *txm = tx_pkts[nb_tx]; int can_push = 0, slots, need; - /* Do VLAN tag insertion */ - if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { - error = rte_vlan_insert(&txm); - if (unlikely(error)) { - rte_pktmbuf_free(txm); - continue; - } - /* vlan_insert may add a header mbuf */ - tx_pkts[nb_tx] = txm; - } - /* optimize ring usage */ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && @@ -2066,7 +2075,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct virtio_hw *hw = vq->hw; uint16_t hdr_size = hw->vtnet_hdr_size; uint16_t nb_used, nb_tx = 0; - int error; if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) return nb_tx; @@ -2085,17 +2093,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct rte_mbuf *txm = tx_pkts[nb_tx]; int can_push = 0, use_indirect = 0, slots, need; - /* Do VLAN tag insertion */ - if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { - error = rte_vlan_insert(&txm); - if (unlikely(error)) { - rte_pktmbuf_free(txm); - continue; - } - /* vlan_insert may add a header mbuf */ - tx_pkts[nb_tx] = txm; - } - /* optimize ring usage */ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && @@ -2165,7 +2162,6 @@ virtio_xmit_pkts_inorder(void *tx_queue, uint16_t hdr_size = hw->vtnet_hdr_size; uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0; struct rte_mbuf *inorder_pkts[nb_pkts]; - int error; if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) return nb_tx; @@ -2190,17 +2186,6 @@ virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf *txm = tx_pkts[nb_tx]; int slots, need; - /* Do VLAN tag insertion */ - if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { - error = rte_vlan_insert(&txm); - if (unlikely(error)) { - rte_pktmbuf_free(txm); - continue; - } - /* vlan_insert may add a header mbuf */ - tx_pkts[nb_tx] = txm; - } - /* optimize ring usage */ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&