net/bnxt: fix reusing L2 filter
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
index f476b10..8178213 100644 (file)
@@ -11,9 +11,9 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_version.h>
+#include <rte_io.h>
 
 #include "bnxt.h"
-#include "bnxt_cpr.h"
 #include "bnxt_filter.h"
 #include "bnxt_hwrm.h"
 #include "bnxt_rxq.h"
 #include "bnxt_vnic.h"
 #include "hsi_struct_def_dpdk.h"
 
-#include <rte_io.h>
-
-#define HWRM_CMD_TIMEOUT               6000000
-#define HWRM_SHORT_CMD_TIMEOUT         50000
 #define HWRM_SPEC_CODE_1_8_3           0x10803
 #define HWRM_VERSION_1_9_1             0x10901
 #define HWRM_VERSION_1_9_2             0x10903
@@ -78,9 +74,9 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
 
 /*
  * HWRM Functions (sent to HWRM)
- * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
- * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
- * command was failed by the ChiMP.
+ * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
+ * HWRM command times out, or a negative error code if the HWRM
+ * command was failed by the FW.
  */
 
 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
@@ -106,9 +102,9 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
 
        /* For VER_GET command, set timeout as 50ms */
        if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
-               timeout = HWRM_SHORT_CMD_TIMEOUT;
-       else
                timeout = HWRM_CMD_TIMEOUT;
+       else
+               timeout = bp->hwrm_cmd_timeout;
 
        if (bp->flags & BNXT_FLAG_SHORT_CMD ||
            msg_len > bp->max_req_len) {
@@ -180,11 +176,11 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
 }
 
 /*
- * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
+ * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
  * spinlock, and does initial processing.
  *
  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
- * releases the spinlock only if it returns.  If the regular int return codes
+ * releases the spinlock only if it returns. If the regular int return codes
  * are not used by the function, HWRM_CHECK_RESULT() should not be used
  * directly, rather it should be copied and modified to suit the function.
  *
@@ -219,8 +215,14 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
                rte_spinlock_unlock(&bp->hwrm_lock); \
                if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
                        rc = -EACCES; \
-               else if (rc > 0) \
+               else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
+                       rc = -ENOSPC; \
+               else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
                        rc = -EINVAL; \
+               else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
+                       rc = -ENOTSUP; \
+               else if (rc > 0) \
+                       rc = -EIO; \
                return rc; \
        } \
        if (resp->error_code) { \
@@ -241,8 +243,14 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
                rte_spinlock_unlock(&bp->hwrm_lock); \
                if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
                        rc = -EACCES; \
-               else if (rc > 0) \
+               else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
+                       rc = -ENOSPC; \
+               else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
                        rc = -EINVAL; \
+               else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
+                       rc = -ENOTSUP; \
+               else if (rc > 0) \
+                       rc = -EIO; \
                return rc; \
        } \
 } while (0)
@@ -283,20 +291,17 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
        HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
        req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 
-       /* FIXME add multicast flag, when multicast adding options is supported
-        * by ethtool.
-        */
        if (vnic->flags & BNXT_VNIC_INFO_BCAST)
                mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
        if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
                mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
+
        if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
                mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
-       if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
+
+       if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
                mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
-       if (vnic->flags & BNXT_VNIC_INFO_MCAST)
-               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
-       if (vnic->mc_addr_cnt) {
+       } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
                mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
                req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
                req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
@@ -358,15 +363,32 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
 }
 
 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
-                          struct bnxt_filter_info *filter)
+                            struct bnxt_filter_info *filter)
 {
        int rc = 0;
+       struct bnxt_filter_info *l2_filter = filter;
+       struct bnxt_vnic_info *vnic = NULL;
        struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
        struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
 
        if (filter->fw_l2_filter_id == UINT64_MAX)
                return 0;
 
+       if (filter->matching_l2_fltr_ptr)
+               l2_filter = filter->matching_l2_fltr_ptr;
+
+       PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
+                   filter, l2_filter, l2_filter->l2_ref_cnt);
+
+       if (l2_filter->l2_ref_cnt == 0)
+               return 0;
+
+       if (l2_filter->l2_ref_cnt > 0)
+               l2_filter->l2_ref_cnt--;
+
+       if (l2_filter->l2_ref_cnt > 0)
+               return 0;
+
        HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
 
        req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
@@ -377,6 +399,14 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
        HWRM_UNLOCK();
 
        filter->fw_l2_filter_id = UINT64_MAX;
+       if (l2_filter->l2_ref_cnt == 0) {
+               vnic = l2_filter->vnic;
+               if (vnic) {
+                       STAILQ_REMOVE(&vnic->filter, l2_filter,
+                                     bnxt_filter_info, next);
+                       bnxt_free_filter(bp, l2_filter);
+               }
+       }
 
        return 0;
 }
@@ -413,8 +443,6 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
        HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
 
        req.flags = rte_cpu_to_le_32(filter->flags);
-       req.flags |=
-       rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
 
        enables = filter->enables |
              HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
@@ -444,6 +472,11 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
                req.src_id = rte_cpu_to_le_32(filter->src_id);
        if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
                req.src_type = filter->src_type;
+       if (filter->pri_hint) {
+               req.pri_hint = filter->pri_hint;
+               req.l2_filter_id_hint =
+                       rte_cpu_to_le_64(filter->l2_filter_id_hint);
+       }
 
        req.enables = rte_cpu_to_le_32(enables);
 
@@ -497,7 +530,6 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
        struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
        struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
 
-/*     if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
        if (ptp)
                return 0;
 
@@ -641,16 +673,15 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
                bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
 
        if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
-               bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
+               bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
                PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
-       } else {
-               bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
        }
 
        if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
-               bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
-       else
-               bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
+               bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
+
+       if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
+               bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
 
        HWRM_UNLOCK();
 
@@ -672,6 +703,39 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
                        bp->flags |= BNXT_FLAG_NEW_RM;
        }
 
+       /* On older FW,
+        * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
+        * But the error can be ignored. Return success.
+        */
+
+       return 0;
+}
+
+/* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
+int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
+{
+       int rc = 0;
+       struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
+       struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+       HWRM_PREP(req, VNIC_QCAPS, BNXT_USE_CHIMP_MB);
+
+       req.target_id = rte_cpu_to_le_16(0xffff);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+       HWRM_CHECK_RESULT();
+
+       if (rte_le_to_cpu_32(resp->flags) &
+           HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
+               bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
+               PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
+       }
+
+       bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
+
+       HWRM_UNLOCK();
+
        return rc;
 }
 
@@ -703,8 +767,9 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
        if (bp->flags & BNXT_FLAG_REGISTERED)
                return 0;
 
-       flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
-       if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
+       if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+               flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
+       if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
                flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
 
        /* PFs and trusted VFs should indicate the support of the
@@ -744,7 +809,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
                                 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
                                 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
                                 ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
-       if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
+       if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
                req.async_event_fwd[0] |=
                        rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
        req.async_event_fwd[1] |=
@@ -757,7 +822,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
 
        flags = rte_le_to_cpu_32(resp->flags);
        if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
-               bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE;
+               bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
 
        HWRM_UNLOCK();
 
@@ -798,9 +863,7 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
        req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
        req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
                                            AGG_RING_MULTIPLIER);
-       req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings +
-                                            bp->tx_nr_rings +
-                                            BNXT_NUM_ASYNC_CPR(bp));
+       req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
        req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
                                              bp->tx_nr_rings +
                                              BNXT_NUM_ASYNC_CPR(bp));
@@ -813,6 +876,10 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
                req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
                req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
                req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
+       } else if (bp->vf_resv_strategy ==
+                  HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
+               enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
+               req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
        }
 
        if (test)
@@ -851,7 +918,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
-       HWRM_CHECK_RESULT();
+       HWRM_CHECK_RESULT_SILENT();
 
        if (BNXT_VF(bp)) {
                bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
@@ -919,6 +986,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
        fw_version |= resp->hwrm_intf_upd_8b;
        bp->hwrm_spec_code = fw_version;
 
+       /* def_req_timeout value is in milliseconds */
+       bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
+       /* convert timeout to usec */
+       bp->hwrm_cmd_timeout *= 1000;
+       if (!bp->hwrm_cmd_timeout)
+               bp->hwrm_cmd_timeout = HWRM_CMD_TIMEOUT;
+
        if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
                PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
                rc = -EINVAL;
@@ -1005,6 +1079,11 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
        if (dev_caps_cfg &
            HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
                PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
+       if (dev_caps_cfg &
+           HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
+               bp->flags |= BNXT_FLAG_ADV_FLOW_MGMT;
+               PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
+       }
 
 error:
        HWRM_UNLOCK();
@@ -1143,49 +1222,101 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
        return rc;
 }
 
+static bool bnxt_find_lossy_profile(struct bnxt *bp)
+{
+       int i = 0;
+
+       for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
+               if (bp->tx_cos_queue[i].profile ==
+                   HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
+                       bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
+                       return true;
+               }
+       }
+       return false;
+}
+
+static void bnxt_find_first_valid_profile(struct bnxt *bp)
+{
+       int i = 0;
+
+       for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
+               if (bp->tx_cos_queue[i].profile !=
+                   HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
+                   bp->tx_cos_queue[i].id !=
+                   HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
+                       bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
+                       break;
+               }
+       }
+}
+
 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
 {
        int rc = 0;
        struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
        struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
        int i;
 
+get_rx_info:
        HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
 
-       req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
-       /* HWRM Version >= 1.9.1 */
-       if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
+       req.flags = rte_cpu_to_le_32(dir);
+       /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
+       if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
+           !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
                req.drv_qmap_cap =
                        HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
        HWRM_CHECK_RESULT();
 
-#define GET_QUEUE_INFO(x) \
-       bp->cos_queue[x].id = resp->queue_id##x; \
-       bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
-
-       GET_QUEUE_INFO(0);
-       GET_QUEUE_INFO(1);
-       GET_QUEUE_INFO(2);
-       GET_QUEUE_INFO(3);
-       GET_QUEUE_INFO(4);
-       GET_QUEUE_INFO(5);
-       GET_QUEUE_INFO(6);
-       GET_QUEUE_INFO(7);
+       if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
+               GET_TX_QUEUE_INFO(0);
+               GET_TX_QUEUE_INFO(1);
+               GET_TX_QUEUE_INFO(2);
+               GET_TX_QUEUE_INFO(3);
+               GET_TX_QUEUE_INFO(4);
+               GET_TX_QUEUE_INFO(5);
+               GET_TX_QUEUE_INFO(6);
+               GET_TX_QUEUE_INFO(7);
+       } else  {
+               GET_RX_QUEUE_INFO(0);
+               GET_RX_QUEUE_INFO(1);
+               GET_RX_QUEUE_INFO(2);
+               GET_RX_QUEUE_INFO(3);
+               GET_RX_QUEUE_INFO(4);
+               GET_RX_QUEUE_INFO(5);
+               GET_RX_QUEUE_INFO(6);
+               GET_RX_QUEUE_INFO(7);
+       }
 
        HWRM_UNLOCK();
 
+       if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
+               goto done;
+
        if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
-               bp->tx_cosq_id = bp->cos_queue[0].id;
+               bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
        } else {
+               int j;
+
                /* iterate and find the COSq profile to use for Tx */
-               for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
-                       if (bp->cos_queue[i].profile ==
-                               HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
-                               bp->tx_cosq_id = bp->cos_queue[i].id;
-                               break;
+               if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
+                       for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
+                               if (bp->tx_cos_queue[i].id != 0xff)
+                                       bp->tx_cosq_id[j++] =
+                                               bp->tx_cos_queue[i].id;
                        }
+               } else {
+                       /* When CoS classification is disabled, for normal NIC
+                        * operations, ideally we should look to use LOSSY.
+                        * If not found, fallback to the first valid profile
+                        */
+                       if (!bnxt_find_lossy_profile(bp))
+                               bnxt_find_first_valid_profile(bp);
+
                }
        }
 
@@ -1195,15 +1326,20 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
                bp->max_tc = BNXT_MAX_QUEUE;
        bp->max_q = bp->max_tc;
 
-       PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
+       if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
+               dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
+               goto get_rx_info;
+       }
 
+done:
        return rc;
 }
 
 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                         struct bnxt_ring *ring,
                         uint32_t ring_type, uint32_t map_index,
-                        uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
+                        uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
+                        uint16_t tx_cosq_id)
 {
        int rc = 0;
        uint32_t enables = 0;
@@ -1225,7 +1361,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                req.ring_type = ring_type;
                req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
                req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
-               req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
+               req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
                if (stats_ctx_id != INVALID_STATS_CTX_ID)
                        enables |=
                        HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
@@ -1395,8 +1531,7 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
 
        HWRM_CHECK_RESULT();
 
-       bp->grp_info[idx].fw_grp_id =
-           rte_le_to_cpu_16(resp->ring_group_id);
+       bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
 
        HWRM_UNLOCK();
 
@@ -1454,8 +1589,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 
        req.update_period_ms = rte_cpu_to_le_32(0);
 
-       req.stats_dma_addr =
-           rte_cpu_to_le_64(cpr->hw_stats_map);
+       req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -1508,8 +1642,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
 
 skip_ring_grps:
-       vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
-                               RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
+       vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
        HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
 
        if (vnic->func_default)
@@ -1608,9 +1741,29 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
 
        if (BNXT_CHIP_THOR(bp)) {
-               struct bnxt_rx_queue *rxq = bp->eth_dev->data->rx_queues[0];
-               struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
-               struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+               int dflt_rxq = vnic->start_grp_id;
+               struct bnxt_rx_ring_info *rxr;
+               struct bnxt_cp_ring_info *cpr;
+               struct bnxt_rx_queue *rxq;
+               int i;
+
+               /*
+                * The first active receive ring is used as the VNIC
+                * default receive ring. If there are no active receive
+                * rings (all corresponding receive queues are stopped),
+                * the first receive ring is used.
+                */
+               for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
+                       rxq = bp->eth_dev->data->rx_queues[i];
+                       if (rxq->rx_started) {
+                               dflt_rxq = i;
+                               break;
+                       }
+               }
+
+               rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
+               rxr = rxq->rx_ring;
+               cpr = rxq->cp_ring;
 
                req.default_rx_ring_id =
                        rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
@@ -1631,6 +1784,11 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
                ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
        }
+       if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
+               ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
+               req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
+       }
+
        enables |= ctx_enable_flag;
        req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
        req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
@@ -1743,8 +1901,9 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
        return rc;
 }
 
-int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
-                           struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
+static
+int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
+                            struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
 {
        int rc = 0;
        struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
@@ -1767,6 +1926,28 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
        return rc;
 }
 
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+       int rc = 0;
+
+       if (BNXT_CHIP_THOR(bp)) {
+               int j;
+
+               for (j = 0; j < vnic->num_lb_ctxts; j++) {
+                       rc = _bnxt_hwrm_vnic_ctx_free(bp,
+                                                     vnic,
+                                                     vnic->fw_grp_ids[j]);
+                       vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
+               }
+               vnic->num_lb_ctxts = 0;
+       } else {
+               rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
+               vnic->rss_rule = INVALID_HW_RING_ID;
+       }
+
+       return rc;
+}
+
 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 {
        int rc = 0;
@@ -1906,8 +2087,16 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
        struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
        struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 
-       if (BNXT_CHIP_THOR(bp))
+       if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
+               if (enable)
+                       PMD_DRV_LOG(ERR, "No HW support for LRO\n");
+               return -ENOTSUP;
+       }
+
+       if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+               PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
                return 0;
+       }
 
        HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
 
@@ -1923,9 +2112,8 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
                                HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
                                HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
                        HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
-               req.max_agg_segs = rte_cpu_to_le_16(5);
-               req.max_aggs =
-                       rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
+               req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
+               req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
                req.min_agg_len = rte_cpu_to_le_32(512);
        }
        req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
@@ -2040,10 +2228,6 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
        return rc;
 }
 
-/*
- * HWRM utility functions
- */
-
 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
 {
        unsigned int i;
@@ -2204,11 +2388,8 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
                        bp->grp_info[queue_index].ag_fw_ring_id =
                                                        INVALID_HW_RING_ID;
        }
-       if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
+       if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
                bnxt_free_cp_ring(bp, cpr);
-               if (rxq->nq_ring)
-                       bnxt_free_nq_ring(bp, rxq->nq_ring);
-       }
 
        if (BNXT_HAS_RING_GRPS(bp))
                bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
@@ -2240,8 +2421,6 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
                if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
                        bnxt_free_cp_ring(bp, cpr);
                        cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
-                       if (txq->nq_ring)
-                               bnxt_free_nq_ring(bp, txq->nq_ring);
                }
        }
 
@@ -2267,6 +2446,10 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
        return rc;
 }
 
+/*
+ * HWRM utility functions
+ */
+
 void bnxt_free_hwrm_resources(struct bnxt *bp)
 {
        /* Release memzone */
@@ -2312,11 +2495,9 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                        rc = bnxt_hwrm_clear_em_filter(bp, filter);
                else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
                        rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
-               else
-                       rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+               rc = bnxt_hwrm_clear_l2_filter(bp, filter);
                STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
-               //if (rc)
-                       //break;
+               bnxt_free_filter(bp, filter);
        }
        return rc;
 }
@@ -2328,20 +2509,18 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        struct rte_flow *flow;
        int rc = 0;
 
-       STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+       while (!STAILQ_EMPTY(&vnic->flow_list)) {
+               flow = STAILQ_FIRST(&vnic->flow_list);
                filter = flow->filter;
                PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
                if (filter->filter_type == HWRM_CFA_EM_FILTER)
                        rc = bnxt_hwrm_clear_em_filter(bp, filter);
                else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
                        rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
-               else
-                       rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+               rc = bnxt_hwrm_clear_l2_filter(bp, filter);
 
                STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
                rte_free(flow);
-               //if (rc)
-                       //break;
        }
        return rc;
 }
@@ -2381,7 +2560,7 @@ void bnxt_free_tunnel_ports(struct bnxt *bp)
 
 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
 {
-       int i, j;
+       int i;
 
        if (bp->vnic_info == NULL)
                return;
@@ -2390,29 +2569,17 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
         * Cleanup VNICs in reverse order, to make sure the L2 filter
         * from vnic0 is last to be cleaned up.
         */
-       for (i = bp->nr_vnics - 1; i >= 0; i--) {
+       for (i = bp->max_vnics - 1; i >= 0; i--) {
                struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
 
-               if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
-                       PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
-                       return;
-               }
+               if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+                       continue;
 
                bnxt_clear_hwrm_vnic_flows(bp, vnic);
 
                bnxt_clear_hwrm_vnic_filters(bp, vnic);
 
-               if (BNXT_CHIP_THOR(bp)) {
-                       for (j = 0; j < vnic->num_lb_ctxts; j++) {
-                               bnxt_hwrm_vnic_ctx_free(bp, vnic,
-                                                       vnic->fw_grp_ids[j]);
-                               vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
-                       }
-                       vnic->num_lb_ctxts = 0;
-               } else {
-                       bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
-                       vnic->rss_rule = INVALID_HW_RING_ID;
-               }
+               bnxt_hwrm_vnic_ctx_free(bp, vnic);
 
                bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
 
@@ -2765,17 +2932,20 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
        if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
                bp->flags |= BNXT_FLAG_MULTI_HOST;
 
-       if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
+       if (BNXT_VF(bp) &&
+           !BNXT_VF_IS_TRUSTED(bp) &&
+           (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
                bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
                PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
        } else if (BNXT_VF(bp) &&
+                  BNXT_VF_IS_TRUSTED(bp) &&
                   !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
                bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
                PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
        }
 
        if (mtu)
-               *mtu = resp->mtu;
+               *mtu = rte_le_to_cpu_16(resp->mtu);
 
        switch (resp->port_partition_type) {
        case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
@@ -2847,9 +3017,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 
        req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
        req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
-       req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
-                                  RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
-                                  BNXT_NUM_VLANS);
+       req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
        req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
        req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
        req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
@@ -2888,9 +3056,7 @@ static void populate_vf_func_cfg_req(struct bnxt *bp,
        req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
                                    RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
                                    BNXT_NUM_VLANS);
-       req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
-                                   RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
-                                   BNXT_NUM_VLANS);
+       req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
        req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
                                                (num_vfs + 1));
        req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
@@ -3517,7 +3683,6 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
                stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
        }
 
-
        HWRM_UNLOCK();
 
        return rc;
@@ -4065,7 +4230,6 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
        if (filter->fw_em_filter_id == UINT64_MAX)
                return 0;
 
-       PMD_DRV_LOG(ERR, "Clear EM filter\n");
        HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
 
        req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
@@ -4102,7 +4266,6 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
              HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
        req.dst_id = rte_cpu_to_le_16(dst_id);
 
-
        if (filter->ip_addr_type) {
                req.ip_addr_type = filter->ip_addr_type;
                enables |=
@@ -4115,10 +4278,6 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
            HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
                memcpy(req.src_macaddr, filter->src_macaddr,
                       RTE_ETHER_ADDR_LEN);
-       //if (enables &
-           //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
-               //memcpy(req.dst_macaddr, filter->dst_macaddr,
-                      //RTE_ETHER_ADDR_LEN);
        if (enables &
            HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
                req.ethertype = rte_cpu_to_be_16(filter->ethertype);
@@ -4238,8 +4397,10 @@ bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                        }
 
                        /* Return if no rings are active. */
-                       if (cnt == max_rings)
+                       if (cnt == max_rings) {
+                               HWRM_UNLOCK();
                                return 0;
+                       }
 
                        /* Add rx/cp ring pair to RSS table. */
                        rxr = rxqs[k]->rx_ring;
@@ -4273,23 +4434,31 @@ int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        if (BNXT_CHIP_THOR(bp))
                return bnxt_vnic_rss_configure_thor(bp, vnic);
 
-       /*
-        * Fill the RSS hash & redirection table with
-        * ring group ids for all VNICs
-        */
-       for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
-               rss_idx++, fw_idx++) {
-               for (i = 0; i < bp->rx_cp_nr_rings; i++) {
-                       fw_idx %= bp->rx_cp_nr_rings;
-                       if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
-                               break;
-                       fw_idx++;
+       if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+               return 0;
+
+       if (vnic->rss_table && vnic->hash_type) {
+               /*
+                * Fill the RSS hash & redirection table with
+                * ring group ids for all VNICs
+                */
+               for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+                       rss_idx++, fw_idx++) {
+                       for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+                               fw_idx %= bp->rx_cp_nr_rings;
+                               if (vnic->fw_grp_ids[fw_idx] !=
+                                   INVALID_HW_RING_ID)
+                                       break;
+                               fw_idx++;
+                       }
+                       if (i == bp->rx_cp_nr_rings)
+                               return 0;
+                       vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
                }
-               if (i == bp->rx_cp_nr_rings)
-                       return 0;
-               vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
+               return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
        }
-       return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+
+       return 0;
 }
 
 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
@@ -4712,7 +4881,7 @@ int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
        uint32_t flags;
        int rc;
 
-       if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE))
+       if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
                return 0;
 
        /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
@@ -4734,6 +4903,9 @@ int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
        flags = rte_le_to_cpu_32(resp->flags);
        HWRM_UNLOCK();
 
+       if (!up)
+               return 0;
+
        if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
                PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
                bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
@@ -4752,7 +4924,7 @@ int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
        int rc;
 
        /* Older FW does not have error recovery support */
-       if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY))
+       if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
                return 0;
 
        if (!info) {
@@ -4899,3 +5071,35 @@ int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
 
        return rc;
 }
+
+int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
+{
+       struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
+                                       bp->hwrm_cmd_resp_addr;
+       struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
+       uint32_t flags = 0;
+       int rc = 0;
+
+       if (!(bp->flags & BNXT_FLAG_ADV_FLOW_MGMT))
+               return rc;
+
+       if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+               PMD_DRV_LOG(DEBUG,
+                           "Not a PF or trusted VF. Command not supported\n");
+               return 0;
+       }
+
+       HWRM_PREP(req, CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp));
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+
+       HWRM_CHECK_RESULT();
+       flags = rte_le_to_cpu_32(resp->flags);
+       HWRM_UNLOCK();
+
+       if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN) {
+               bp->flow_flags |= BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN;
+               PMD_DRV_LOG(INFO, "Source L2 header filtering enabled\n");
+       }
+
+       return rc;
+}