net/mlx5: fix Tx stats error counter logic
authorShahaf Shuler <shahafs@mellanox.com>
Thu, 14 Sep 2017 10:50:38 +0000 (13:50 +0300)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 6 Oct 2017 00:49:48 +0000 (02:49 +0200)
Tx error counter lacks the logic of incrementation, making it useless
for applications.

Fixes: 87011737b715 ("mlx5: add software counters")
Cc: stable@dpdk.org
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
drivers/net/mlx5/mlx5_rxtx.c

index f89fa40..3e6ef8b 100644 (file)
@@ -406,8 +406,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 #ifdef MLX5_PMD_SOFT_COUNTERS
                total_length = length;
 #endif
-               if (length < (MLX5_WQE_DWORD_SIZE + 2))
+               if (length < (MLX5_WQE_DWORD_SIZE + 2)) {
+                       txq->stats.oerrors++;
                        break;
+               }
                /* Update element. */
                (*txq->elts)[elts_head & elts_m] = buf;
                /* Prefetch next buffer data. */
@@ -482,8 +484,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                        cs_flags |= MLX5_ETH_WQE_L4_CSUM;
                                }
                                if (unlikely(tso_header_sz >
-                                            MLX5_MAX_TSO_HEADER))
+                                            MLX5_MAX_TSO_HEADER)) {
+                                       txq->stats.oerrors++;
                                        break;
+                               }
                                copy_b = tso_header_sz - pkt_inline_sz;
                                /* First seg must contain all headers. */
                                assert(copy_b <= length);
@@ -836,8 +840,10 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                if (max_elts < segs_n)
                        break;
                /* Do not bother with large packets MPW cannot handle. */
-               if (segs_n > MLX5_MPW_DSEG_MAX)
+               if (segs_n > MLX5_MPW_DSEG_MAX) {
+                       txq->stats.oerrors++;
                        break;
+               }
                max_elts -= segs_n;
                --pkts_n;
                /* Should we enable HW CKSUM offload */
@@ -1057,8 +1063,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                if (max_elts < segs_n)
                        break;
                /* Do not bother with large packets MPW cannot handle. */
-               if (segs_n > MLX5_MPW_DSEG_MAX)
+               if (segs_n > MLX5_MPW_DSEG_MAX) {
+                       txq->stats.oerrors++;
                        break;
+               }
                max_elts -= segs_n;
                --pkts_n;
                /*
@@ -1346,8 +1354,10 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                if (max_elts - j < segs_n)
                        break;
                /* Do not bother with large packets MPW cannot handle. */
-               if (segs_n > MLX5_MPW_DSEG_MAX)
+               if (segs_n > MLX5_MPW_DSEG_MAX) {
+                       txq->stats.oerrors++;
                        break;
+               }
                /* Should we enable HW CKSUM offload. */
                if (buf->ol_flags &
                    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))