net/enic: fix TSO for packets greater than 9208 bytes
authorJohn Daley <johndale@cisco.com>
Thu, 2 Nov 2017 05:47:10 +0000 (22:47 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Thu, 2 Nov 2017 18:32:04 +0000 (19:32 +0100)
A check was previously added to drop Tx packets greater than what the Nic
is capable of sending since such packets can freeze the send queue. The
check did not account for TSO packets however, so TSO was limited to 9208
bytes.

Check packet length only for non-TSO packets. Also insure that TSO packet
segment size plus the headers do not exceed what the Nic is capable of
since this also can freeze the send queue.

Use the PKT_TX_TCP_SEG ol_flag instead of m->tso_segsz which is the
preferred way to check for TSO.

Fixes: ed6e564c214e ("net/enic: fix memory leak with oversized Tx packets")
Cc: stable@dpdk.org
Signed-off-by: John Daley <johndale@cisco.com>
drivers/net/enic/enic_rxtx.c

index 74450ea..a3663d5 100644 (file)
@@ -546,12 +546,15 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint64_t bus_addr;
        uint8_t offload_mode;
        uint16_t header_len;
+       uint64_t tso;
+       rte_atomic64_t *tx_oversized;
 
        enic_cleanup_wq(enic, wq);
        wq_desc_avail = vnic_wq_desc_avail(wq);
        head_idx = wq->head_idx;
        desc_count = wq->ring.desc_count;
        ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
+       tx_oversized = &enic->soft_stats.tx_oversized;
 
        nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
 
@@ -561,10 +564,12 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                data_len = tx_pkt->data_len;
                ol_flags = tx_pkt->ol_flags;
                nb_segs = tx_pkt->nb_segs;
+               tso = ol_flags & PKT_TX_TCP_SEG;
 
-               if (pkt_len > ENIC_TX_MAX_PKT_SIZE) {
+               /* drop packet if it's too big to send */
+               if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
                        rte_pktmbuf_free(tx_pkt);
-                       rte_atomic64_inc(&enic->soft_stats.tx_oversized);
+                       rte_atomic64_inc(tx_oversized);
                        continue;
                }
 
@@ -587,13 +592,21 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
                header_len = 0;
 
-               if (tx_pkt->tso_segsz) {
+               if (tso) {
                        header_len = tso_header_len(tx_pkt);
-                       if (header_len) {
-                               offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
-                               mss = tx_pkt->tso_segsz;
+
+                       /* Drop if non-TCP packet or TSO seg size is too big */
+                       if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
+                           header_len) > ENIC_TX_MAX_PKT_SIZE))) {
+                               rte_pktmbuf_free(tx_pkt);
+                               rte_atomic64_inc(tx_oversized);
+                               continue;
                        }
+
+                       offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
+                       mss = tx_pkt->tso_segsz;
                }
+
                if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
                        if (ol_flags & PKT_TX_IP_CKSUM)
                                mss |= ENIC_CALC_IP_CKSUM;