remove useless include of EAL memory config header
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
index 1de2854..27ead19 100644 (file)
@@ -559,7 +559,6 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
 
                /* TCP Segmentation Offload */
                if (cookie->ol_flags & PKT_TX_TCP_SEG) {
-                       virtio_tso_fix_cksum(cookie);
                        hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
                                VIRTIO_NET_HDR_GSO_TCPV6 :
                                VIRTIO_NET_HDR_GSO_TCPV4;
@@ -890,11 +889,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxvq = &vq->rxq;
        rxvq->queue_id = queue_idx;
        rxvq->mpool = mp;
-       if (rxvq->mpool == NULL) {
-               rte_exit(EXIT_FAILURE,
-                       "Cannot allocate mbufs for rx virtqueue");
-       }
-
        dev->data->rx_queues[queue_idx] = rxvq;
 
        return 0;
@@ -1424,7 +1418,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
        struct virtqueue *vq = rxvq->vq;
        struct virtio_hw *hw = vq->hw;
        struct rte_mbuf *rxm;
-       struct rte_mbuf *prev;
+       struct rte_mbuf *prev = NULL;
        uint16_t nb_used, num, nb_rx;
        uint32_t len[VIRTIO_MBUF_BURST_SZ];
        struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
@@ -1516,11 +1510,8 @@ virtio_recv_pkts_inorder(void *rx_queue,
                        rxm->data_len = (uint16_t)(len[i]);
 
                        rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
-                       rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
-                       if (prev)
-                               prev->next = rxm;
 
+                       prev->next = rxm;
                        prev = rxm;
                        seg_res -= 1;
                }
@@ -1536,7 +1527,6 @@ virtio_recv_pkts_inorder(void *rx_queue,
                uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
                                        VIRTIO_MBUF_BURST_SZ);
 
-               prev = rcv_pkts[nb_rx];
                if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
                        virtio_rmb(hw->weak_barriers);
                        num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
@@ -1553,7 +1543,6 @@ virtio_recv_pkts_inorder(void *rx_queue,
                                prev->next = rxm;
                                prev = rxm;
                                rx_pkts[nb_rx]->pkt_len += len[extra_idx];
-                               rx_pkts[nb_rx]->data_len += len[extra_idx];
                                extra_idx += 1;
                        };
                        seg_res -= rcv_cnt;
@@ -1565,7 +1554,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
                } else {
                        PMD_RX_LOG(ERR,
                                        "No enough segments for packet.");
-                       virtio_discard_rxbuf_inorder(vq, prev);
+                       rte_pktmbuf_free(rx_pkts[nb_rx]);
                        rxvq->stats.errors++;
                        break;
                }
@@ -1616,7 +1605,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
        struct virtqueue *vq = rxvq->vq;
        struct virtio_hw *hw = vq->hw;
        struct rte_mbuf *rxm;
-       struct rte_mbuf *prev;
+       struct rte_mbuf *prev = NULL;
        uint16_t nb_used, num, nb_rx = 0;
        uint32_t len[VIRTIO_MBUF_BURST_SZ];
        struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
@@ -1703,11 +1692,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,
                        rxm->data_len = (uint16_t)(len[i]);
 
                        rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
-                       rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
-                       if (prev)
-                               prev->next = rxm;
 
+                       prev->next = rxm;
                        prev = rxm;
                        seg_res -= 1;
                }
@@ -1723,7 +1709,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
                uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
                                        VIRTIO_MBUF_BURST_SZ);
 
-               prev = rcv_pkts[nb_rx];
                if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
                        virtio_rmb(hw->weak_barriers);
                        num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
@@ -1740,7 +1725,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
                                prev->next = rxm;
                                prev = rxm;
                                rx_pkts[nb_rx]->pkt_len += len[extra_idx];
-                               rx_pkts[nb_rx]->data_len += len[extra_idx];
                                extra_idx += 1;
                        };
                        seg_res -= rcv_cnt;
@@ -1752,7 +1736,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
                } else {
                        PMD_RX_LOG(ERR,
                                        "No enough segments for packet.");
-                       virtio_discard_rxbuf(vq, prev);
+                       rte_pktmbuf_free(rx_pkts[nb_rx]);
                        rxvq->stats.errors++;
                        break;
                }
@@ -1881,11 +1865,8 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
                        rxm->data_len = (uint16_t)(len[i]);
 
                        rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
-                       rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
-
-                       if (prev)
-                               prev->next = rxm;
 
+                       prev->next = rxm;
                        prev = rxm;
                        seg_res -= 1;
                }
@@ -1900,39 +1881,33 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
        while (seg_res != 0) {
                uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
                                        VIRTIO_MBUF_BURST_SZ);
-               if (likely(vq->vq_free_cnt >= rcv_cnt)) {
-                       num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
-                                       len, rcv_cnt);
-                       uint16_t extra_idx = 0;
+               uint16_t extra_idx = 0;
 
-                       rcv_cnt = num;
+               rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
+                               len, rcv_cnt);
+               if (unlikely(rcv_cnt == 0)) {
+                       PMD_RX_LOG(ERR, "No enough segments for packet.");
+                       rte_pktmbuf_free(rx_pkts[nb_rx]);
+                       rxvq->stats.errors++;
+                       break;
+               }
 
-                       while (extra_idx < rcv_cnt) {
-                               rxm = rcv_pkts[extra_idx];
+               while (extra_idx < rcv_cnt) {
+                       rxm = rcv_pkts[extra_idx];
 
-                               rxm->data_off =
-                                       RTE_PKTMBUF_HEADROOM - hdr_size;
-                               rxm->pkt_len = (uint32_t)(len[extra_idx]);
-                               rxm->data_len = (uint16_t)(len[extra_idx]);
+                       rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+                       rxm->pkt_len = (uint32_t)(len[extra_idx]);
+                       rxm->data_len = (uint16_t)(len[extra_idx]);
 
-                               prev->next = rxm;
-                               prev = rxm;
-                               rx_pkts[nb_rx]->pkt_len += len[extra_idx];
-                               rx_pkts[nb_rx]->data_len += len[extra_idx];
-                               extra_idx += 1;
-                       }
-                       seg_res -= rcv_cnt;
-                       if (!seg_res) {
-                               virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
-                               nb_rx++;
-                       }
-               } else {
-                       PMD_RX_LOG(ERR,
-                                       "No enough segments for packet.");
-                       if (prev)
-                               virtio_discard_rxbuf(vq, prev);
-                       rxvq->stats.errors++;
-                       break;
+                       prev->next = rxm;
+                       prev = rxm;
+                       rx_pkts[nb_rx]->pkt_len += len[extra_idx];
+                       extra_idx += 1;
+               }
+               seg_res -= rcv_cnt;
+               if (!seg_res) {
+                       virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+                       nb_rx++;
                }
        }
 
@@ -1969,6 +1944,51 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
        return nb_rx;
 }
 
+uint16_t
+virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
+                       uint16_t nb_pkts)
+{
+       uint16_t nb_tx;
+       int error;
+
+       for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+               struct rte_mbuf *m = tx_pkts[nb_tx];
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+               error = rte_validate_tx_offload(m);
+               if (unlikely(error)) {
+                       rte_errno = -error;
+                       break;
+               }
+#endif
+
+               /* Do VLAN tag insertion */
+               if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
+                       error = rte_vlan_insert(&m);
+                       /* rte_vlan_insert() may change pointer
+                        * even in the case of failure
+                        */
+                       tx_pkts[nb_tx] = m;
+
+                       if (unlikely(error)) {
+                               rte_errno = -error;
+                               break;
+                       }
+               }
+
+               error = rte_net_intel_cksum_prepare(m);
+               if (unlikely(error)) {
+                       rte_errno = -error;
+                       break;
+               }
+
+               if (m->ol_flags & PKT_TX_TCP_SEG)
+                       virtio_tso_fix_cksum(m);
+       }
+
+       return nb_tx;
+}
+
 uint16_t
 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
                        uint16_t nb_pkts)
@@ -1979,7 +1999,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint16_t hdr_size = hw->vtnet_hdr_size;
        uint16_t nb_tx = 0;
        bool in_order = hw->use_inorder_tx;
-       int error;
 
        if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
                return nb_tx;
@@ -1997,17 +2016,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
                struct rte_mbuf *txm = tx_pkts[nb_tx];
                int can_push = 0, slots, need;
 
-               /* Do VLAN tag insertion */
-               if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
-                       error = rte_vlan_insert(&txm);
-                       if (unlikely(error)) {
-                               rte_pktmbuf_free(txm);
-                               continue;
-                       }
-                       /* vlan_insert may add a header mbuf */
-                       tx_pkts[nb_tx] = txm;
-               }
-
                /* optimize ring usage */
                if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
                      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
@@ -2067,7 +2075,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        struct virtio_hw *hw = vq->hw;
        uint16_t hdr_size = hw->vtnet_hdr_size;
        uint16_t nb_used, nb_tx = 0;
-       int error;
 
        if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
                return nb_tx;
@@ -2086,17 +2093,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                struct rte_mbuf *txm = tx_pkts[nb_tx];
                int can_push = 0, use_indirect = 0, slots, need;
 
-               /* Do VLAN tag insertion */
-               if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
-                       error = rte_vlan_insert(&txm);
-                       if (unlikely(error)) {
-                               rte_pktmbuf_free(txm);
-                               continue;
-                       }
-                       /* vlan_insert may add a header mbuf */
-                       tx_pkts[nb_tx] = txm;
-               }
-
                /* optimize ring usage */
                if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
                      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
@@ -2166,7 +2162,6 @@ virtio_xmit_pkts_inorder(void *tx_queue,
        uint16_t hdr_size = hw->vtnet_hdr_size;
        uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
        struct rte_mbuf *inorder_pkts[nb_pkts];
-       int error;
 
        if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
                return nb_tx;
@@ -2191,17 +2186,6 @@ virtio_xmit_pkts_inorder(void *tx_queue,
                struct rte_mbuf *txm = tx_pkts[nb_tx];
                int slots, need;
 
-               /* Do VLAN tag insertion */
-               if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
-                       error = rte_vlan_insert(&txm);
-                       if (unlikely(error)) {
-                               rte_pktmbuf_free(txm);
-                               continue;
-                       }
-                       /* vlan_insert may add a header mbuf */
-                       tx_pkts[nb_tx] = txm;
-               }
-
                /* optimize ring usage */
                if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
                     vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&