net/bnxt: refactor the query stats
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
index d2a8d8f..a1aa80e 100644 (file)
@@ -139,7 +139,7 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
        }
 
        if (i >= HWRM_CMD_TIMEOUT) {
-               RTE_LOG(ERR, PMD, "Error sending msg %x\n",
+               RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
                        req->req_type);
                goto err_ret;
        }
@@ -176,7 +176,22 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
                } \
                if (resp->error_code) { \
                        rc = rte_le_to_cpu_16(resp->error_code); \
-                       RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
+                       if (resp->resp_len >= 16) { \
+                               struct hwrm_err_output *tmp_hwrm_err_op = \
+                                                       (void *)resp; \
+                               RTE_LOG(ERR, PMD, \
+                                       "%s error %d:%d:%08x:%04x\n", \
+                                       __func__, \
+                                       rc, tmp_hwrm_err_op->cmd_err, \
+                                       rte_le_to_cpu_32(\
+                                               tmp_hwrm_err_op->opaque_0), \
+                                       rte_le_to_cpu_16(\
+                                               tmp_hwrm_err_op->opaque_1)); \
+                       } \
+                       else { \
+                               RTE_LOG(ERR, PMD, \
+                                       "%s error %d\n", __func__, rc); \
+                       } \
                        return rc; \
                } \
        }
@@ -214,7 +229,12 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
                mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
        if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
-               mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+       if (vnic->mc_addr_cnt) {
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
+               req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
+               req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
+       }
        req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
                                    mask);
 
@@ -246,7 +266,7 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp,
 }
 
 int bnxt_hwrm_set_filter(struct bnxt *bp,
-                        struct bnxt_vnic_info *vnic,
+                        uint16_t dst_id,
                         struct bnxt_filter_info *filter)
 {
        int rc = 0;
@@ -260,7 +280,7 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
 
        enables = filter->enables |
              HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
-       req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+       req.dst_id = rte_cpu_to_le_16(dst_id);
 
        if (enables &
            HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
@@ -645,20 +665,20 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                         struct bnxt_ring *ring,
                         uint32_t ring_type, uint32_t map_index,
-                        uint32_t stats_ctx_id)
+                        uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
 {
        int rc = 0;
+       uint32_t enables = 0;
        struct hwrm_ring_alloc_input req = {.req_type = 0 };
        struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
 
        HWRM_PREP(req, RING_ALLOC, -1, resp);
 
-       req.enables = rte_cpu_to_le_32(0);
-
        req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
        req.fbo = rte_cpu_to_le_32(0);
        /* Association of ring index with doorbell index */
        req.logical_id = rte_cpu_to_le_16(map_index);
+       req.length = rte_cpu_to_le_32(ring->ring_size);
 
        switch (ring_type) {
        case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
@@ -666,12 +686,11 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                /* FALLTHROUGH */
        case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
                req.ring_type = ring_type;
-               req.cmpl_ring_id =
-                   rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
-               req.length = rte_cpu_to_le_32(ring->ring_size);
+               req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
                req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
-               req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
-                       HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
+               if (stats_ctx_id != INVALID_STATS_CTX_ID)
+                       enables |=
+                       HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
                break;
        case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
                req.ring_type = ring_type;
@@ -680,13 +699,13 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
                 */
                req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
-               req.length = rte_cpu_to_le_32(ring->ring_size);
                break;
        default:
                RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
                        ring_type);
                return -1;
        }
+       req.enables = rte_cpu_to_le_32(enables);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
@@ -802,13 +821,12 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
        struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
        struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
 
-       HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
-
        if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
                return rc;
 
+       HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
+
        req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
-       req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
@@ -817,8 +835,8 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
        return rc;
 }
 
-int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
-                            struct bnxt_cp_ring_info *cpr, unsigned int idx)
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+                               unsigned int idx __rte_unused)
 {
        int rc;
        struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
@@ -826,9 +844,8 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
 
        HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
 
-       req.update_period_ms = rte_cpu_to_le_32(1000);
+       req.update_period_ms = rte_cpu_to_le_32(0);
 
-       req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
        req.stats_dma_addr =
            rte_cpu_to_le_64(cpr->hw_stats_map);
 
@@ -837,13 +854,12 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
        HWRM_CHECK_RESULT;
 
        cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
-       bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
 
        return rc;
 }
 
-int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
-                           struct bnxt_cp_ring_info *cpr, unsigned int idx)
+int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+                               unsigned int idx __rte_unused)
 {
        int rc;
        struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
@@ -852,15 +868,11 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
        HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
 
        req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
-       req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
        HWRM_CHECK_RESULT;
 
-       cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
-       bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
-
        return rc;
 }
 
@@ -871,15 +883,10 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
 
        /* map ring groups to this vnic */
-       for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
-               if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
-                       RTE_LOG(ERR, PMD,
-                               "Not enough ring groups avail:%x req:%x\n", j,
-                               (vnic->end_grp_id - vnic->start_grp_id) + 1);
-                       break;
-               }
+       RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
+               vnic->start_grp_id, vnic->end_grp_id);
+       for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
                vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
-       }
        vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
        vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
        vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
@@ -1131,6 +1138,70 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
        return rc;
 }
 
+int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
+                       struct bnxt_vnic_info *vnic)
+{
+       int rc = 0;
+       struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
+       struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       uint16_t size;
+
+       HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
+
+       req.flags = rte_cpu_to_le_32(
+                       HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
+
+       req.enables = rte_cpu_to_le_32(
+               HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
+
+       size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
+       size -= RTE_PKTMBUF_HEADROOM;
+
+       req.jumbo_thresh = rte_cpu_to_le_16(size);
+       req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
+int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
+                       struct bnxt_vnic_info *vnic, bool enable)
+{
+       int rc = 0;
+       struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
+       struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+       HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
+
+       if (enable) {
+               req.enables = rte_cpu_to_le_32(
+                               HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
+                               HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
+                               HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
+               req.flags = rte_cpu_to_le_32(
+                               HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
+                               HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
+                               HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
+                               HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
+                               HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
+                       HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
+               req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+               req.max_agg_segs = rte_cpu_to_le_16(5);
+               req.max_aggs =
+                       rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
+               req.min_agg_len = rte_cpu_to_le_32(512);
+       }
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
 {
        struct hwrm_func_cfg_input req = {0};
@@ -1153,6 +1224,43 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
        return rc;
 }
 
+int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
+                         struct rte_eth_stats *stats)
+{
+       int rc = 0;
+       struct hwrm_func_qstats_input req = {.req_type = 0};
+       struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+
+       HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+
+       req.fid = rte_cpu_to_le_16(fid);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT;
+
+       stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+       stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
+       stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
+       stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+       stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
+       stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
+
+       stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+       stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
+       stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
+       stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+       stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
+       stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+
+       stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
+       stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
+
+       stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
+
+       return rc;
+}
+
 /*
  * HWRM utility functions
  */
@@ -1189,14 +1297,20 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
        struct bnxt_cp_ring_info *cpr;
 
        for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
-               unsigned int idx = i + 1;
 
                if (i >= bp->rx_cp_nr_rings)
                        cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
                else
                        cpr = bp->rx_queues[i]->cp_ring;
                if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
-                       rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
+                       rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
+                       cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+                       /*
+                        * TODO. Need a better way to reset grp_info.stats_ctx
+                        * for Rx rings only. stats_ctx is not saved for Tx
+                        * in grp_info.
+                        */
+                       bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
                        if (rc)
                                return rc;
                }
@@ -1213,7 +1327,6 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
                struct bnxt_tx_queue *txq;
                struct bnxt_rx_queue *rxq;
                struct bnxt_cp_ring_info *cpr;
-               unsigned int idx = i + 1;
 
                if (i >= bp->rx_cp_nr_rings) {
                        txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
@@ -1223,7 +1336,7 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
                        cpr = rxq->cp_ring;
                }
 
-               rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
+               rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
 
                if (rc)
                        return rc;
@@ -1233,11 +1346,10 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
 
 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
 {
-       uint16_t i;
+       uint16_t idx;
        uint32_t rc = 0;
 
-       for (i = 0; i < bp->rx_cp_nr_rings; i++) {
-               unsigned int idx = i + 1;
+       for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
 
                if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
                        RTE_LOG(ERR, PMD,
@@ -1254,8 +1366,8 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
        return rc;
 }
 
-static void bnxt_free_cp_ring(struct bnxt *bp,
-                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
+static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+                               unsigned int idx __rte_unused)
 {
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 
@@ -1293,8 +1405,10 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
                        txr->tx_prod = 0;
                        txr->tx_cons = 0;
                }
-               if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+               if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
                        bnxt_free_cp_ring(bp, cpr, idx);
+                       cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+               }
        }
 
        for (i = 0; i < bp->rx_cp_nr_rings; i++) {
@@ -1316,17 +1430,26 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
                                        rxr->rx_ring_struct->ring_size *
                                        sizeof(*rxr->rx_buf_ring));
                        rxr->rx_prod = 0;
+                       memset(rxr->ag_buf_ring, 0,
+                                       rxr->ag_ring_struct->ring_size *
+                                       sizeof(*rxr->ag_buf_ring));
+                       rxr->ag_prod = 0;
                }
-               if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+               if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
                        bnxt_free_cp_ring(bp, cpr, idx);
+                       bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+                       cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+               }
        }
 
        /* Default completion ring */
        {
                struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
 
-               if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+               if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
                        bnxt_free_cp_ring(bp, cpr, 0);
+                       cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+               }
        }
 
        return rc;
@@ -1338,14 +1461,7 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
        uint32_t rc = 0;
 
        for (i = 0; i < bp->rx_cp_nr_rings; i++) {
-               unsigned int idx = i + 1;
-
-               if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
-                   bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
-                       continue;
-
-               rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
-
+               rc = bnxt_hwrm_ring_grp_alloc(bp, i);
                if (rc)
                        return rc;
        }
@@ -1404,13 +1520,25 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        int rc = 0;
 
        STAILQ_FOREACH(filter, &vnic->filter, next) {
-               rc = bnxt_hwrm_set_filter(bp, vnic, filter);
+               rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
                if (rc)
                        break;
        }
        return rc;
 }
 
+void bnxt_free_tunnel_ports(struct bnxt *bp)
+{
+       if (bp->vxlan_port_cnt)
+               bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
+                       HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
+       bp->vxlan_port = 0;
+       if (bp->geneve_port_cnt)
+               bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
+                       HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
+       bp->geneve_port = 0;
+}
+
 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
 {
        struct bnxt_vnic_info *vnic;
@@ -1430,12 +1558,16 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
                bnxt_clear_hwrm_vnic_filters(bp, vnic);
 
                bnxt_hwrm_vnic_ctx_free(bp, vnic);
+
+               bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
+
                bnxt_hwrm_vnic_free(bp, vnic);
        }
        /* Ring resources */
        bnxt_free_all_hwrm_rings(bp);
        bnxt_free_all_hwrm_ring_grps(bp);
        bnxt_free_all_hwrm_stat_ctxs(bp);
+       bnxt_free_tunnel_ports(bp);
 }
 
 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
@@ -2036,6 +2168,49 @@ error_free:
        return rc;
 }
 
+int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
+                               uint8_t tunnel_type)
+{
+       struct hwrm_tunnel_dst_port_alloc_input req = {0};
+       struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc = 0;
+
+       HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
+       req.tunnel_type = tunnel_type;
+       req.tunnel_dst_port_val = port;
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       switch (tunnel_type) {
+       case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
+               bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+               bp->vxlan_port = port;
+               break;
+       case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
+               bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
+               bp->geneve_port = port;
+               break;
+       default:
+               break;
+       }
+       return rc;
+}
+
+int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
+                               uint8_t tunnel_type)
+{
+       struct hwrm_tunnel_dst_port_free_input req = {0};
+       struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc = 0;
+
+       HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
+       req.tunnel_type = tunnel_type;
+       req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
 
 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 {
@@ -2115,6 +2290,37 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
        return rc;
 }
 
+int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       uint16_t dflt_vlan, fid;
+       uint32_t func_cfg_flags;
+       int rc = 0;
+
+       HWRM_PREP(req, FUNC_CFG, -1, resp);
+
+       if (is_vf) {
+               dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
+               fid = bp->pf.vf_info[vf].fid;
+               func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
+       } else {
+               fid = rte_cpu_to_le_16(0xffff);
+               func_cfg_flags = bp->pf.func_cfg_flags;
+               dflt_vlan = bp->vlan;
+       }
+
+       req.flags = rte_cpu_to_le_32(func_cfg_flags);
+       req.fid = rte_cpu_to_le_16(fid);
+       req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
+       req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
                              void *encaped, size_t ec_size)
 {
@@ -2175,3 +2381,75 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
 
        return rc;
 }
+
+int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
+                        struct rte_eth_stats *stats)
+{
+       int rc = 0;
+       struct hwrm_stat_ctx_query_input req = {.req_type = 0};
+       struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
+
+       HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
+
+       req.stat_ctx_id = rte_cpu_to_le_32(cid);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT;
+
+       stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+       stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
+       stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
+       stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+       stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
+       stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
+
+       stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+       stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
+       stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
+       stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+       stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
+       stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+
+       stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
+       stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
+       stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
+
+       return rc;
+}
+
+int bnxt_hwrm_port_qstats(struct bnxt *bp)
+{
+       struct hwrm_port_qstats_input req = {0};
+       struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+       struct bnxt_pf_info *pf = &bp->pf;
+       int rc;
+
+       if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+               return 0;
+
+       HWRM_PREP(req, PORT_QSTATS, -1, resp);
+       req.port_id = rte_cpu_to_le_16(pf->port_id);
+       req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
+       req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+       return rc;
+}
+
+int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
+{
+       struct hwrm_port_clr_stats_input req = {0};
+       struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+       struct bnxt_pf_info *pf = &bp->pf;
+       int rc;
+
+       if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+               return 0;
+
+       HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
+       req.port_id = rte_cpu_to_le_16(pf->port_id);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+       return rc;
+}