net/ice: fix VXLAN/NVGRE flow matching
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
index 8dccd69..27ead19 100644 (file)
@@ -559,7 +559,6 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
 
                /* TCP Segmentation Offload */
                if (cookie->ol_flags & PKT_TX_TCP_SEG) {
-                       virtio_tso_fix_cksum(cookie);
                        hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
                                VIRTIO_NET_HDR_GSO_TCPV6 :
                                VIRTIO_NET_HDR_GSO_TCPV4;
@@ -1512,9 +1511,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
 
                        rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
 
-                       if (prev)
-                               prev->next = rxm;
-
+                       prev->next = rxm;
                        prev = rxm;
                        seg_res -= 1;
                }
@@ -1557,7 +1554,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
                } else {
                        PMD_RX_LOG(ERR,
                                        "No enough segments for packet.");
-                       virtio_discard_rxbuf_inorder(vq, prev);
+                       rte_pktmbuf_free(rx_pkts[nb_rx]);
                        rxvq->stats.errors++;
                        break;
                }
@@ -1696,9 +1693,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
 
                        rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
 
-                       if (prev)
-                               prev->next = rxm;
-
+                       prev->next = rxm;
                        prev = rxm;
                        seg_res -= 1;
                }
@@ -1741,7 +1736,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
                } else {
                        PMD_RX_LOG(ERR,
                                        "No enough segments for packet.");
-                       virtio_discard_rxbuf(vq, prev);
+                       rte_pktmbuf_free(rx_pkts[nb_rx]);
                        rxvq->stats.errors++;
                        break;
                }
@@ -1871,9 +1866,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
 
                        rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
 
-                       if (prev)
-                               prev->next = rxm;
-
+                       prev->next = rxm;
                        prev = rxm;
                        seg_res -= 1;
                }
@@ -1888,38 +1881,33 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
        while (seg_res != 0) {
                uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
                                        VIRTIO_MBUF_BURST_SZ);
-               if (likely(vq->vq_free_cnt >= rcv_cnt)) {
-                       num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
-                                       len, rcv_cnt);
-                       uint16_t extra_idx = 0;
+               uint16_t extra_idx = 0;
 
-                       rcv_cnt = num;
+               rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
+                               len, rcv_cnt);
+               if (unlikely(rcv_cnt == 0)) {
+                       PMD_RX_LOG(ERR, "No enough segments for packet.");
+                       rte_pktmbuf_free(rx_pkts[nb_rx]);
+                       rxvq->stats.errors++;
+                       break;
+               }
 
-                       while (extra_idx < rcv_cnt) {
-                               rxm = rcv_pkts[extra_idx];
+               while (extra_idx < rcv_cnt) {
+                       rxm = rcv_pkts[extra_idx];
 
-                               rxm->data_off =
-                                       RTE_PKTMBUF_HEADROOM - hdr_size;
-                               rxm->pkt_len = (uint32_t)(len[extra_idx]);
-                               rxm->data_len = (uint16_t)(len[extra_idx]);
+                       rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+                       rxm->pkt_len = (uint32_t)(len[extra_idx]);
+                       rxm->data_len = (uint16_t)(len[extra_idx]);
 
-                               prev->next = rxm;
-                               prev = rxm;
-                               rx_pkts[nb_rx]->pkt_len += len[extra_idx];
-                               extra_idx += 1;
-                       }
-                       seg_res -= rcv_cnt;
-                       if (!seg_res) {
-                               virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
-                               nb_rx++;
-                       }
-               } else {
-                       PMD_RX_LOG(ERR,
-                                       "No enough segments for packet.");
-                       if (prev)
-                               virtio_discard_rxbuf(vq, prev);
-                       rxvq->stats.errors++;
-                       break;
+                       prev->next = rxm;
+                       prev = rxm;
+                       rx_pkts[nb_rx]->pkt_len += len[extra_idx];
+                       extra_idx += 1;
+               }
+               seg_res -= rcv_cnt;
+               if (!seg_res) {
+                       virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+                       nb_rx++;
                }
        }
 
@@ -1956,6 +1944,51 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
        return nb_rx;
 }
 
+uint16_t
+virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
+                       uint16_t nb_pkts)
+{
+       uint16_t nb_tx;
+       int error;
+
+       for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+               struct rte_mbuf *m = tx_pkts[nb_tx];
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+               error = rte_validate_tx_offload(m);
+               if (unlikely(error)) {
+                       rte_errno = -error;
+                       break;
+               }
+#endif
+
+               /* Do VLAN tag insertion */
+               if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
+                       error = rte_vlan_insert(&m);
+                       /* rte_vlan_insert() may change pointer
+                        * even in the case of failure
+                        */
+                       tx_pkts[nb_tx] = m;
+
+                       if (unlikely(error)) {
+                               rte_errno = -error;
+                               break;
+                       }
+               }
+
+               error = rte_net_intel_cksum_prepare(m);
+               if (unlikely(error)) {
+                       rte_errno = -error;
+                       break;
+               }
+
+               if (m->ol_flags & PKT_TX_TCP_SEG)
+                       virtio_tso_fix_cksum(m);
+       }
+
+       return nb_tx;
+}
+
 uint16_t
 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
                        uint16_t nb_pkts)
@@ -1966,7 +1999,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint16_t hdr_size = hw->vtnet_hdr_size;
        uint16_t nb_tx = 0;
        bool in_order = hw->use_inorder_tx;
-       int error;
 
        if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
                return nb_tx;
@@ -1984,17 +2016,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
                struct rte_mbuf *txm = tx_pkts[nb_tx];
                int can_push = 0, slots, need;
 
-               /* Do VLAN tag insertion */
-               if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
-                       error = rte_vlan_insert(&txm);
-                       if (unlikely(error)) {
-                               rte_pktmbuf_free(txm);
-                               continue;
-                       }
-                       /* vlan_insert may add a header mbuf */
-                       tx_pkts[nb_tx] = txm;
-               }
-
                /* optimize ring usage */
                if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
                      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
@@ -2054,7 +2075,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        struct virtio_hw *hw = vq->hw;
        uint16_t hdr_size = hw->vtnet_hdr_size;
        uint16_t nb_used, nb_tx = 0;
-       int error;
 
        if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
                return nb_tx;
@@ -2073,17 +2093,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                struct rte_mbuf *txm = tx_pkts[nb_tx];
                int can_push = 0, use_indirect = 0, slots, need;
 
-               /* Do VLAN tag insertion */
-               if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
-                       error = rte_vlan_insert(&txm);
-                       if (unlikely(error)) {
-                               rte_pktmbuf_free(txm);
-                               continue;
-                       }
-                       /* vlan_insert may add a header mbuf */
-                       tx_pkts[nb_tx] = txm;
-               }
-
                /* optimize ring usage */
                if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
                      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
@@ -2153,7 +2162,6 @@ virtio_xmit_pkts_inorder(void *tx_queue,
        uint16_t hdr_size = hw->vtnet_hdr_size;
        uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
        struct rte_mbuf *inorder_pkts[nb_pkts];
-       int error;
 
        if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
                return nb_tx;
@@ -2178,17 +2186,6 @@ virtio_xmit_pkts_inorder(void *tx_queue,
                struct rte_mbuf *txm = tx_pkts[nb_tx];
                int slots, need;
 
-               /* Do VLAN tag insertion */
-               if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
-                       error = rte_vlan_insert(&txm);
-                       if (unlikely(error)) {
-                               rte_pktmbuf_free(txm);
-                               continue;
-                       }
-                       /* vlan_insert may add a header mbuf */
-                       tx_pkts[nb_tx] = txm;
-               }
-
                /* optimize ring usage */
                if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
                     vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&