net/ark: fix kvargs memory leak
[dpdk.git] / drivers / net / qede / qede_rxtx.c
index c6add0f..baea1bb 100644 (file)
@@ -83,7 +83,7 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
 int
 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                    uint16_t nb_desc, unsigned int socket_id,
-                   const struct rte_eth_rxconf *rx_conf,
+                   __rte_unused const struct rte_eth_rxconf *rx_conf,
                    struct rte_mempool *mp)
 {
        struct qede_dev *qdev = dev->data->dev_private;
@@ -330,8 +330,8 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
 static void qede_init_fp(struct qede_dev *qdev)
 {
        struct qede_fastpath *fp;
-       uint8_t i, rss_id, tc;
-       int fp_rx = qdev->fp_num_rx, rxq = 0, txq = 0;
+       uint8_t i;
+       int fp_rx = qdev->fp_num_rx;
 
        memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
                                           sizeof(*qdev->fp_array)));
@@ -367,11 +367,9 @@ void qede_free_fp_arrays(struct qede_dev *qdev)
        }
 }
 
-int qede_alloc_fp_array(struct qede_dev *qdev)
+static int qede_alloc_fp_array(struct qede_dev *qdev)
 {
-       struct qede_fastpath *fp;
        struct ecore_dev *edev = &qdev->edev;
-       int i;
 
        qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
                                    sizeof(*qdev->fp_array),
@@ -477,7 +475,8 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
 }
 
 static inline void
-qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
+qede_update_rx_prod(__rte_unused struct qede_dev *edev,
+                   struct qede_rx_queue *rxq)
 {
        uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
        uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
@@ -514,14 +513,17 @@ qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
        /* Enable LRO in split mode */
        sge_tpa_params->tpa_ipv4_en_flg = enable;
        sge_tpa_params->tpa_ipv6_en_flg = enable;
-       sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
-       sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
+       sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
+       sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
        /* set if tpa enable changes */
        sge_tpa_params->update_tpa_en_flg = 1;
        /* set if tpa parameters should be handled */
        sge_tpa_params->update_tpa_param_flg = enable;
 
        sge_tpa_params->max_buffers_per_cqe = 20;
+       /* Enable TPA in split mode. In this mode each TPA segment
+        * starts on the new BD, so there is one BD per segment.
+        */
        sge_tpa_params->tpa_pkt_split_flg = 1;
        sge_tpa_params->tpa_hdr_data_split_flg = 0;
        sge_tpa_params->tpa_gro_consistent_flg = 0;
@@ -531,7 +533,8 @@ qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
        sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
 }
 
-static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
+static int qede_start_queues(struct rte_eth_dev *eth_dev,
+                            __rte_unused bool clear_stats)
 {
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
@@ -544,7 +547,6 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
        dma_addr_t p_phys_table;
        int txq_index;
        uint16_t page_cnt;
-       int vlan_removal_en = 1;
        int rc, tc, i;
 
        for_each_queue(i) {
@@ -697,6 +699,33 @@ static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
        return 0;
 }
 
+static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
+{
+       uint16_t val;
+
+       /* Lookup table */
+       static const uint32_t
+       ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
+               [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,
+               [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,
+               [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+               [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+               [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+               [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+       };
+
+       /* Bits (0..3) provides L3/L4 protocol type */
+       val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
+              PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
+              (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
+               PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;
+
+       if (val < QEDE_PKT_TYPE_MAX)
+               return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;
+       else
+               return RTE_PTYPE_UNKNOWN;
+}
+
 static inline uint8_t
 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
 {
@@ -732,7 +761,7 @@ static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
 }
 
 static inline void
-qede_reuse_page(struct qede_dev *qdev,
+qede_reuse_page(__rte_unused struct qede_dev *qdev,
                struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
 {
        struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
@@ -765,119 +794,69 @@ qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
        }
 }
 
-static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
-{
-       uint16_t val;
-
-       /* Lookup table */
-       static const uint32_t
-       ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
-               [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,
-               [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,
-               [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
-               [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
-               [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
-               [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
-       };
-
-       /* Bits (0..3) provides L3/L4 protocol type */
-       val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
-              PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
-              (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
-               PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;
-
-       if (val < QEDE_PKT_TYPE_MAX)
-               return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;
-       else
-               return RTE_PTYPE_UNKNOWN;
-}
-
 static inline void
-qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
-                            struct qede_rx_queue *rxq,
-                            struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
+                                    struct qede_rx_queue *rxq,
+                                    uint8_t agg_index, uint16_t len)
 {
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct qede_agg_info *tpa_info;
-       struct rte_mbuf *temp_frag; /* Pointer to mbuf chain head */
-       struct rte_mbuf *curr_frag;
-       uint8_t list_count = 0;
+       struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
        uint16_t cons_idx;
-       uint8_t i;
-
-       PMD_RX_LOG(INFO, rxq, "TPA cont[%02x] - len_list [%04x %04x]\n",
-                  cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]),
-                  rte_le_to_cpu_16(cqe->len_list[1]));
-
-       tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
-       temp_frag = tpa_info->mbuf;
-       assert(temp_frag);
 
-       for (i = 0; cqe->len_list[i]; i++) {
+       /* Under certain conditions it is possible that FW may not consume
+        * additional or new BD. So decision to consume the BD must be made
+        * based on len_list[0].
+        */
+       if (rte_le_to_cpu_16(len)) {
+               tpa_info = &rxq->tpa_info[agg_index];
                cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
                curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
+               assert(curr_frag);
+               curr_frag->nb_segs = 1;
+               curr_frag->pkt_len = rte_le_to_cpu_16(len);
+               curr_frag->data_len = curr_frag->pkt_len;
+               tpa_info->tpa_tail->next = curr_frag;
+               tpa_info->tpa_tail = curr_frag;
                qede_rx_bd_ring_consume(rxq);
-               curr_frag->data_len = rte_le_to_cpu_16(cqe->len_list[i]);
-               temp_frag->next = curr_frag;
-               temp_frag = curr_frag;
-               list_count++;
-       }
-
-       /* Allocate RX mbuf on the RX BD ring for those many consumed  */
-       for (i = 0 ; i < list_count ; i++) {
                if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
-                       DP_ERR(edev, "Failed to allocate mbuf for LRO cont\n");
-                       tpa_info->state = QEDE_AGG_STATE_ERROR;
+                       PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
+                       rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       rxq->rx_alloc_errors++;
                }
        }
 }
 
+static inline void
+qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
+                            struct qede_rx_queue *rxq,
+                            struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+       PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
+                  cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
+       /* only len_list[0] will have value */
+       qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+                                            cqe->len_list[0]);
+}
+
 static inline void
 qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
                            struct qede_rx_queue *rxq,
                            struct eth_fast_path_rx_tpa_end_cqe *cqe)
 {
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct qede_agg_info *tpa_info;
-       struct rte_mbuf *temp_frag; /* Pointer to mbuf chain head */
-       struct rte_mbuf *curr_frag;
-       struct rte_mbuf *rx_mb;
-       uint8_t list_count = 0;
-       uint16_t cons_idx;
-       uint8_t i;
-
-       PMD_RX_LOG(INFO, rxq, "TPA End[%02x] - len_list [%04x %04x]\n",
-                  cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]),
-                  rte_le_to_cpu_16(cqe->len_list[1]));
-
-       tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
-       temp_frag = tpa_info->mbuf;
-       assert(temp_frag);
-
-       for (i = 0; cqe->len_list[i]; i++) {
-               cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
-               curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
-               qede_rx_bd_ring_consume(rxq);
-               curr_frag->data_len = rte_le_to_cpu_16(cqe->len_list[i]);
-               temp_frag->next = curr_frag;
-               temp_frag = curr_frag;
-               list_count++;
-       }
-
-       /* Allocate RX mbuf on the RX BD ring for those many consumed */
-       for (i = 0 ; i < list_count ; i++) {
-               if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
-                       DP_ERR(edev, "Failed to allocate mbuf for lro end\n");
-                       tpa_info->state = QEDE_AGG_STATE_ERROR;
-               }
-       }
+       struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
 
+       qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+                                            cqe->len_list[0]);
        /* Update total length and frags based on end TPA */
-       rx_mb = rxq->tpa_info[cqe->tpa_agg_index].mbuf;
-       /* TBD: Add sanity checks here */
+       rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
+       /* TODO:  Add Sanity Checks */
        rx_mb->nb_segs = cqe->num_of_bds;
        rx_mb->pkt_len = cqe->total_packet_len;
-       tpa_info->state = QEDE_AGG_STATE_NONE;
+
+       PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
+                  " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
+                  rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
+                  rx_mb->pkt_len);
 }
 
 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
@@ -947,7 +926,6 @@ qede_process_sg_pkts(void *p_rxq,  struct rte_mbuf *rx_mb,
 {
        struct qede_rx_queue *rxq = p_rxq;
        struct qede_dev *qdev = rxq->qdev;
-       struct ecore_dev *edev = &qdev->edev;
        register struct rte_mbuf *seg1 = NULL;
        register struct rte_mbuf *seg2 = NULL;
        uint16_t sw_rx_index;
@@ -987,17 +965,19 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
        uint16_t rx_pkt = 0;
        union eth_rx_cqe *cqe;
-       struct eth_fast_path_rx_reg_cqe *fp_cqe;
+       struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
        register struct rte_mbuf *rx_mb = NULL;
        register struct rte_mbuf *seg1 = NULL;
        enum eth_rx_cqe_type cqe_type;
-       uint16_t pkt_len; /* Sum of all BD segments */
+       uint16_t pkt_len = 0; /* Sum of all BD segments */
        uint16_t len; /* Length of first BD */
        uint8_t num_segs = 1;
        uint16_t preload_idx;
-       uint8_t csum_flag;
        uint16_t parse_flag;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+       uint8_t bitfield_val;
        enum rss_hash_type htype;
+#endif
        uint8_t tunn_parse_flag;
        uint8_t j;
        struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
@@ -1005,9 +985,9 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        uint32_t packet_type;
        uint16_t vlan_tci;
        bool tpa_start_flg;
-       uint8_t bitfield_val;
        uint8_t offset, tpa_agg_idx, flags;
-       struct qede_agg_info *tpa_info;
+       struct qede_agg_info *tpa_info = NULL;
+       uint32_t rss_hash;
 
        hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
        sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -1022,6 +1002,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                packet_type = RTE_PTYPE_UNKNOWN;
                vlan_tci = 0;
                tpa_start_flg = false;
+               rss_hash = 0;
 
                /* Get the CQE from the completion ring */
                cqe =
@@ -1037,9 +1018,15 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        cqe_start_tpa = &cqe->fast_path_tpa_start;
                        tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
                        tpa_start_flg = true;
+                       /* Mark it as LRO packet */
+                       ol_flags |= PKT_RX_LRO;
+                       /* In split mode,  seg_len is same as len_on_first_bd
+                        * and ext_bd_len_list will be empty since there are
+                        * no additional buffers
+                        */
                        PMD_RX_LOG(INFO, rxq,
-                           "TPA start[%u] - len %04x [header %02x]"
-                           " [bd_list[0] %04x], [seg_len %04x]\n",
+                           "TPA start[%d] - len_on_first_bd %d header %d"
+                           " [bd_list[0] %d], [seg_len %d]\n",
                            cqe_start_tpa->tpa_agg_index,
                            rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
                            cqe_start_tpa->header_len,
@@ -1050,14 +1037,13 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                case ETH_RX_CQE_TYPE_TPA_CONT:
                        qede_rx_process_tpa_cont_cqe(qdev, rxq,
                                                     &cqe->fast_path_tpa_cont);
-                       continue;
+                       goto next_cqe;
                case ETH_RX_CQE_TYPE_TPA_END:
                        qede_rx_process_tpa_end_cqe(qdev, rxq,
                                                    &cqe->fast_path_tpa_end);
                        tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
-                       rx_mb = rxq->tpa_info[tpa_agg_idx].mbuf;
-                       PMD_RX_LOG(INFO, rxq, "TPA end reason %d\n",
-                                  cqe->fast_path_tpa_end.end_reason);
+                       tpa_info = &rxq->tpa_info[tpa_agg_idx];
+                       rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
                        goto tpa_end;
                case ETH_RX_CQE_TYPE_SLOW_PATH:
                        PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
@@ -1076,20 +1062,29 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                /* Handle regular CQE or TPA start CQE */
                if (!tpa_start_flg) {
                        parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
-                       bitfield_val = fp_cqe->bitfields;
                        offset = fp_cqe->placement_offset;
                        len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
                        pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+                       vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+                       rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+                       bitfield_val = fp_cqe->bitfields;
+                       htype = (uint8_t)GET_FIELD(bitfield_val,
+                                       ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+#endif
                } else {
                        parse_flag =
                            rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
-                       bitfield_val = cqe_start_tpa->bitfields;
                        offset = cqe_start_tpa->placement_offset;
                        /* seg_len = len_on_first_bd */
                        len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
-                       tpa_info->start_cqe_bd_len = len +
-                                               cqe_start_tpa->header_len;
-                       tpa_info->mbuf = rx_mb;
+                       vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+                       bitfield_val = cqe_start_tpa->bitfields;
+                       htype = (uint8_t)GET_FIELD(bitfield_val,
+                               ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE);
+#endif
+                       rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
                }
                if (qede_tunn_exist(parse_flag)) {
                        PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
@@ -1136,24 +1131,24 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                }
 
                if (CQE_HAS_VLAN(parse_flag)) {
-                       vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
                        ol_flags |= PKT_RX_VLAN_PKT;
+                       if (qdev->vlan_strip_flg) {
+                               ol_flags |= PKT_RX_VLAN_STRIPPED;
+                               rx_mb->vlan_tci = vlan_tci;
+                       }
                }
-
                if (CQE_HAS_OUTER_VLAN(parse_flag)) {
-                       vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
                        ol_flags |= PKT_RX_QINQ_PKT;
+                       if (qdev->vlan_strip_flg) {
+                               rx_mb->vlan_tci = vlan_tci;
+                               ol_flags |= PKT_RX_QINQ_STRIPPED;
+                       }
                        rx_mb->vlan_tci_outer = 0;
                }
-
                /* RSS Hash */
-               htype = (uint8_t)GET_FIELD(bitfield_val,
-                                       ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
-               if (qdev->rss_enable && htype) {
+               if (qdev->rss_enable) {
                        ol_flags |= PKT_RX_RSS_HASH;
-                       rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
-                       PMD_RX_LOG(INFO, rxq, "Hash result 0x%x\n",
-                                  rx_mb->hash.rss);
+                       rx_mb->hash.rss = rss_hash;
                }
 
                if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
@@ -1200,13 +1195,19 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                rx_mb->port = rxq->port_id;
                rx_mb->ol_flags = ol_flags;
                rx_mb->data_len = len;
-               rx_mb->vlan_tci = vlan_tci;
                rx_mb->packet_type = packet_type;
-               PMD_RX_LOG(INFO, rxq, "pkt_type %04x len %04x flags %04lx\n",
-                          packet_type, len, (unsigned long)ol_flags);
+               PMD_RX_LOG(INFO, rxq,
+                          "pkt_type 0x%04x len %u hash_type %d hash_val 0x%x"
+                          " ol_flags 0x%04lx\n",
+                          packet_type, len, htype, rx_mb->hash.rss,
+                          (unsigned long)ol_flags);
                if (!tpa_start_flg) {
                        rx_mb->nb_segs = fp_cqe->bd_num;
                        rx_mb->pkt_len = pkt_len;
+               } else {
+                       /* store ref to the updated mbuf */
+                       tpa_info->tpa_head = rx_mb;
+                       tpa_info->tpa_tail = tpa_info->tpa_head;
                }
                rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
 tpa_end:
@@ -1240,7 +1241,6 @@ qede_free_tx_pkt(struct qede_tx_queue *txq)
        struct rte_mbuf *mbuf;
        uint16_t nb_segs;
        uint16_t idx;
-       uint8_t nbds;
 
        idx = TX_CONS(txq);
        mbuf = txq->sw_tx_ring[idx].mbuf;
@@ -1264,16 +1264,21 @@ qede_free_tx_pkt(struct qede_tx_queue *txq)
 }
 
 static inline void
-qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
+qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
+                     struct qede_tx_queue *txq)
 {
        uint16_t hw_bd_cons;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
        uint16_t sw_tx_cons;
+#endif
 
        rte_compiler_barrier();
        hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
        sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
        PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
                   abs(hw_bd_cons - sw_tx_cons));
+#endif
        while (hw_bd_cons !=  ecore_chain_get_cons_idx(&txq->tx_pbl))
                qede_free_tx_pkt(txq);
 }
@@ -1360,10 +1365,16 @@ print_tx_bd_info(struct qede_tx_queue *txq,
 
 /* TX prepare to check packets meets TX conditions */
 uint16_t
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
 qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
                    uint16_t nb_pkts)
 {
        struct qede_tx_queue *txq = p_txq;
+#else
+qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
+                   uint16_t nb_pkts)
+{
+#endif
        uint64_t ol_flags;
        struct rte_mbuf *m;
        uint16_t i;
@@ -1410,9 +1421,11 @@ qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
                }
        }
 
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
        if (unlikely(i != nb_pkts))
                PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
                           nb_pkts - i);
+#endif
        return i;
 }
 
@@ -1458,7 +1471,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                bd3 = NULL;
                hdr_size = 0;
 
-               mbuf = *tx_pkts;
+               mbuf = *tx_pkts++;
                assert(mbuf);
 
                /* Check minimum TX BDS availability against available BDs */
@@ -1500,7 +1513,6 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                /* Fill the entry in the SW ring and the BDs in the FW ring */
                idx = TX_PROD(txq);
-               *tx_pkts++;
                txq->sw_tx_ring[idx].mbuf = mbuf;
 
                /* BD1 */
@@ -1651,7 +1663,7 @@ static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct qede_fastpath *fp;
-       uint8_t i, rss_id, txq_index, tc;
+       uint8_t i, txq_index, tc;
        int rxq = 0, txq = 0;
 
        for_each_queue(i) {
@@ -1679,8 +1691,6 @@ int qede_dev_start(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
-       struct qed_link_output link_output;
-       struct qede_fastpath *fp;
        int rc;
 
        DP_INFO(edev, "Device state is %d\n", qdev->state);
@@ -1712,10 +1722,6 @@ int qede_dev_start(struct rte_eth_dev *eth_dev)
        /* Bring-up the link */
        qede_dev_set_link_state(eth_dev, true);
 
-       /* Reset ring */
-       if (qede_reset_fp_rings(qdev))
-               return -ENOMEM;
-
        /* Start/resume traffic */
        qdev->ops->fastpath_start(edev);
 
@@ -1835,6 +1841,7 @@ static int qede_stop_queues(struct qede_dev *qdev)
                        }
                }
        }
+       qede_reset_fp_rings(qdev);
 
        return 0;
 }