net/i40e: refactor VXLAN flow parsing function
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
index 7c7c9ce..6d98d5a 100644 (file)
@@ -139,7 +139,7 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
        }
 
        if (i >= HWRM_CMD_TIMEOUT) {
-               RTE_LOG(ERR, PMD, "Error sending msg %x\n",
+               RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
                        req->req_type);
                goto err_ret;
        }
@@ -176,7 +176,22 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
                } \
                if (resp->error_code) { \
                        rc = rte_le_to_cpu_16(resp->error_code); \
-                       RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
+                       if (resp->resp_len >= 16) { \
+                               struct hwrm_err_output *tmp_hwrm_err_op = \
+                                                       (void *)resp; \
+                               RTE_LOG(ERR, PMD, \
+                                       "%s error %d:%d:%08x:%04x\n", \
+                                       __func__, \
+                                       rc, tmp_hwrm_err_op->cmd_err, \
+                                       rte_le_to_cpu_32(\
+                                               tmp_hwrm_err_op->opaque_0), \
+                                       rte_le_to_cpu_16(\
+                                               tmp_hwrm_err_op->opaque_1)); \
+                       } \
+                       else { \
+                               RTE_LOG(ERR, PMD, \
+                                       "%s error %d\n", __func__, rc); \
+                       } \
                        return rc; \
                } \
        }
@@ -198,7 +213,10 @@ int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        return rc;
 }
 
-int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
+                                struct bnxt_vnic_info *vnic,
+                                uint16_t vlan_count,
+                                struct bnxt_vlan_table_entry *vlan_table)
 {
        int rc = 0;
        struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
@@ -211,10 +229,27 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        /* FIXME add multicast flag, when multicast adding options is supported
         * by ethtool.
         */
+       if (vnic->flags & BNXT_VNIC_INFO_BCAST)
+               mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
+       if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
        if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
-               mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
        if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
-               mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+       if (vnic->flags & BNXT_VNIC_INFO_MCAST)
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
+       if (vnic->mc_addr_cnt) {
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
+               req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
+               req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
+       }
+       if (vlan_count && vlan_table) {
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
+               req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
+                        rte_mem_virt2phy(vlan_table));
+               req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
+       }
        req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
                                    mask);
 
@@ -246,7 +281,7 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp,
 }
 
 int bnxt_hwrm_set_filter(struct bnxt *bp,
-                        struct bnxt_vnic_info *vnic,
+                        uint16_t dst_id,
                         struct bnxt_filter_info *filter)
 {
        int rc = 0;
@@ -260,7 +295,7 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
 
        enables = filter->enables |
              HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
-       req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+       req.dst_id = rte_cpu_to_le_16(dst_id);
 
        if (enables &
            HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
@@ -276,6 +311,10 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
        if (enables &
            HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
                req.l2_ovlan_mask = filter->l2_ovlan_mask;
+       if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
+               req.src_id = rte_cpu_to_le_32(filter->src_id);
+       if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
+               req.src_type = filter->src_type;
 
        req.enables = rte_cpu_to_le_32(enables);
 
@@ -489,8 +528,15 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                        rc = -ENOMEM;
                        goto error;
                }
+               rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
                bp->hwrm_cmd_resp_dma_addr =
-                       rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
+                       rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+               if (bp->hwrm_cmd_resp_dma_addr == 0) {
+                       RTE_LOG(ERR, PMD,
+                       "Unable to map response buffer to physical memory.\n");
+                       rc = -ENOMEM;
+                       goto error;
+               }
                bp->max_resp_len = max_resp_len;
        }
 
@@ -638,20 +684,20 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                         struct bnxt_ring *ring,
                         uint32_t ring_type, uint32_t map_index,
-                        uint32_t stats_ctx_id)
+                        uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
 {
        int rc = 0;
+       uint32_t enables = 0;
        struct hwrm_ring_alloc_input req = {.req_type = 0 };
        struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
 
        HWRM_PREP(req, RING_ALLOC, -1, resp);
 
-       req.enables = rte_cpu_to_le_32(0);
-
        req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
        req.fbo = rte_cpu_to_le_32(0);
        /* Association of ring index with doorbell index */
        req.logical_id = rte_cpu_to_le_16(map_index);
+       req.length = rte_cpu_to_le_32(ring->ring_size);
 
        switch (ring_type) {
        case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
@@ -659,12 +705,11 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                /* FALLTHROUGH */
        case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
                req.ring_type = ring_type;
-               req.cmpl_ring_id =
-                   rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
-               req.length = rte_cpu_to_le_32(ring->ring_size);
+               req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
                req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
-               req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
-                       HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
+               if (stats_ctx_id != INVALID_STATS_CTX_ID)
+                       enables |=
+                       HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
                break;
        case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
                req.ring_type = ring_type;
@@ -673,13 +718,13 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
                 */
                req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
-               req.length = rte_cpu_to_le_32(ring->ring_size);
                break;
        default:
                RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
                        ring_type);
                return -1;
        }
+       req.enables = rte_cpu_to_le_32(enables);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
@@ -795,13 +840,12 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
        struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
        struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
 
-       HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
-
        if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
                return rc;
 
+       HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
+
        req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
-       req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
@@ -810,8 +854,8 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
        return rc;
 }
 
-int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
-                            struct bnxt_cp_ring_info *cpr, unsigned int idx)
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+                               unsigned int idx __rte_unused)
 {
        int rc;
        struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
@@ -819,9 +863,8 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
 
        HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
 
-       req.update_period_ms = rte_cpu_to_le_32(1000);
+       req.update_period_ms = rte_cpu_to_le_32(0);
 
-       req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
        req.stats_dma_addr =
            rte_cpu_to_le_64(cpr->hw_stats_map);
 
@@ -830,13 +873,12 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
        HWRM_CHECK_RESULT;
 
        cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
-       bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
 
        return rc;
 }
 
-int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
-                           struct bnxt_cp_ring_info *cpr, unsigned int idx)
+int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+                               unsigned int idx __rte_unused)
 {
        int rc;
        struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
@@ -845,15 +887,11 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
        HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
 
        req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
-       req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
        HWRM_CHECK_RESULT;
 
-       cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
-       bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
-
        return rc;
 }
 
@@ -864,15 +902,10 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
 
        /* map ring groups to this vnic */
-       for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
-               if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
-                       RTE_LOG(ERR, PMD,
-                               "Not enough ring groups avail:%x req:%x\n", j,
-                               (vnic->end_grp_id - vnic->start_grp_id) + 1);
-                       break;
-               }
+       RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
+               vnic->start_grp_id, vnic->end_grp_id);
+       for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
                vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
-       }
        vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
        vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
        vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
@@ -1124,6 +1157,70 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
        return rc;
 }
 
+int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
+                       struct bnxt_vnic_info *vnic)
+{
+       int rc = 0;
+       struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
+       struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       uint16_t size;
+
+       HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
+
+       req.flags = rte_cpu_to_le_32(
+                       HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
+
+       req.enables = rte_cpu_to_le_32(
+               HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
+
+       size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
+       size -= RTE_PKTMBUF_HEADROOM;
+
+       req.jumbo_thresh = rte_cpu_to_le_16(size);
+       req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
+int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
+                       struct bnxt_vnic_info *vnic, bool enable)
+{
+       int rc = 0;
+       struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
+       struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+       HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
+
+       if (enable) {
+               req.enables = rte_cpu_to_le_32(
+                               HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
+                               HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
+                               HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
+               req.flags = rte_cpu_to_le_32(
+                               HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
+                               HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
+                               HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
+                               HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
+                               HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
+                       HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
+               req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+               req.max_agg_segs = rte_cpu_to_le_16(5);
+               req.max_aggs =
+                       rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
+               req.min_agg_len = rte_cpu_to_le_32(512);
+       }
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
 {
        struct hwrm_func_cfg_input req = {0};
@@ -1146,6 +1243,81 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
        return rc;
 }
 
+int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
+                                 uint64_t *dropped)
+{
+       int rc = 0;
+       struct hwrm_func_qstats_input req = {.req_type = 0};
+       struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+
+       HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+
+       req.fid = rte_cpu_to_le_16(fid);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT;
+
+       if (dropped)
+               *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
+
+       return rc;
+}
+
+int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
+                         struct rte_eth_stats *stats)
+{
+       int rc = 0;
+       struct hwrm_func_qstats_input req = {.req_type = 0};
+       struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+
+       HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+
+       req.fid = rte_cpu_to_le_16(fid);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT;
+
+       stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+       stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
+       stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
+       stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+       stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
+       stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
+
+       stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+       stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
+       stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
+       stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+       stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
+       stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+
+       stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
+       stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
+
+       stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
+
+       return rc;
+}
+
+int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
+{
+       int rc = 0;
+       struct hwrm_func_clr_stats_input req = {.req_type = 0};
+       struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+
+       HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
+
+       req.fid = rte_cpu_to_le_16(fid);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
 /*
  * HWRM utility functions
  */
@@ -1182,14 +1354,20 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
        struct bnxt_cp_ring_info *cpr;
 
        for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
-               unsigned int idx = i + 1;
 
                if (i >= bp->rx_cp_nr_rings)
                        cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
                else
                        cpr = bp->rx_queues[i]->cp_ring;
                if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
-                       rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
+                       rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
+                       cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+                       /*
+                        * TODO. Need a better way to reset grp_info.stats_ctx
+                        * for Rx rings only. stats_ctx is not saved for Tx
+                        * in grp_info.
+                        */
+                       bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
                        if (rc)
                                return rc;
                }
@@ -1206,7 +1384,6 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
                struct bnxt_tx_queue *txq;
                struct bnxt_rx_queue *rxq;
                struct bnxt_cp_ring_info *cpr;
-               unsigned int idx = i + 1;
 
                if (i >= bp->rx_cp_nr_rings) {
                        txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
@@ -1216,7 +1393,7 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
                        cpr = rxq->cp_ring;
                }
 
-               rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
+               rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
 
                if (rc)
                        return rc;
@@ -1226,11 +1403,10 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
 
 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
 {
-       uint16_t i;
+       uint16_t idx;
        uint32_t rc = 0;
 
-       for (i = 0; i < bp->rx_cp_nr_rings; i++) {
-               unsigned int idx = i + 1;
+       for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
 
                if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
                        RTE_LOG(ERR, PMD,
@@ -1247,8 +1423,8 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
        return rc;
 }
 
-static void bnxt_free_cp_ring(struct bnxt *bp,
-                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
+static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+                               unsigned int idx __rte_unused)
 {
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 
@@ -1286,8 +1462,10 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
                        txr->tx_prod = 0;
                        txr->tx_cons = 0;
                }
-               if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+               if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
                        bnxt_free_cp_ring(bp, cpr, idx);
+                       cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+               }
        }
 
        for (i = 0; i < bp->rx_cp_nr_rings; i++) {
@@ -1309,17 +1487,26 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
                                        rxr->rx_ring_struct->ring_size *
                                        sizeof(*rxr->rx_buf_ring));
                        rxr->rx_prod = 0;
+                       memset(rxr->ag_buf_ring, 0,
+                                       rxr->ag_ring_struct->ring_size *
+                                       sizeof(*rxr->ag_buf_ring));
+                       rxr->ag_prod = 0;
                }
-               if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+               if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
                        bnxt_free_cp_ring(bp, cpr, idx);
+                       bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+                       cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+               }
        }
 
        /* Default completion ring */
        {
                struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
 
-               if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+               if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
                        bnxt_free_cp_ring(bp, cpr, 0);
+                       cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+               }
        }
 
        return rc;
@@ -1331,14 +1518,7 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
        uint32_t rc = 0;
 
        for (i = 0; i < bp->rx_cp_nr_rings; i++) {
-               unsigned int idx = i + 1;
-
-               if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
-                   bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
-                       continue;
-
-               rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
-
+               rc = bnxt_hwrm_ring_grp_alloc(bp, i);
                if (rc)
                        return rc;
        }
@@ -1363,10 +1543,16 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
        bp->max_req_len = HWRM_MAX_REQ_LEN;
        bp->max_resp_len = HWRM_MAX_RESP_LEN;
        bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
+       rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
        if (bp->hwrm_cmd_resp_addr == NULL)
                return -ENOMEM;
        bp->hwrm_cmd_resp_dma_addr =
-               rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
+               rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+       if (bp->hwrm_cmd_resp_dma_addr == 0) {
+               RTE_LOG(ERR, PMD,
+                       "unable to map response address to physical memory\n");
+               return -ENOMEM;
+       }
        rte_spinlock_init(&bp->hwrm_lock);
 
        return 0;
@@ -1391,13 +1577,25 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        int rc = 0;
 
        STAILQ_FOREACH(filter, &vnic->filter, next) {
-               rc = bnxt_hwrm_set_filter(bp, vnic, filter);
+               rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
                if (rc)
                        break;
        }
        return rc;
 }
 
+void bnxt_free_tunnel_ports(struct bnxt *bp)
+{
+       if (bp->vxlan_port_cnt)
+               bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
+                       HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
+       bp->vxlan_port = 0;
+       if (bp->geneve_port_cnt)
+               bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
+                       HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
+       bp->geneve_port = 0;
+}
+
 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
 {
        struct bnxt_vnic_info *vnic;
@@ -1417,12 +1615,16 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
                bnxt_clear_hwrm_vnic_filters(bp, vnic);
 
                bnxt_hwrm_vnic_ctx_free(bp, vnic);
+
+               bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
+
                bnxt_hwrm_vnic_free(bp, vnic);
        }
        /* Ring resources */
        bnxt_free_all_hwrm_rings(bp);
        bnxt_free_all_hwrm_ring_grps(bp);
        bnxt_free_all_hwrm_stat_ctxs(bp);
+       bnxt_free_tunnel_ports(bp);
 }
 
 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
@@ -1873,6 +2075,27 @@ static void reserve_resources_from_vf(struct bnxt *bp,
        bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
 }
 
+int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
+{
+       struct hwrm_func_qcfg_input req = {0};
+       struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc;
+
+       /* Check for zero MAC address */
+       HWRM_PREP(req, FUNC_QCFG, -1, resp);
+       req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       if (rc) {
+               RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
+               return -1;
+       } else if (resp->error_code) {
+               rc = rte_le_to_cpu_16(resp->error_code);
+               RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
+               return -1;
+       }
+       return rte_le_to_cpu_16(resp->vlan);
+}
+
 static int update_pf_resource_max(struct bnxt *bp)
 {
        struct hwrm_func_qcfg_input req = {0};
@@ -2023,6 +2246,94 @@ error_free:
        return rc;
 }
 
+int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc;
+
+       HWRM_PREP(req, FUNC_CFG, -1, resp);
+
+       req.fid = rte_cpu_to_le_16(0xffff);
+       req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
+       req.evb_mode = bp->pf.evb_mode;
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
+int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
+                               uint8_t tunnel_type)
+{
+       struct hwrm_tunnel_dst_port_alloc_input req = {0};
+       struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc = 0;
+
+       HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
+       req.tunnel_type = tunnel_type;
+       req.tunnel_dst_port_val = port;
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       switch (tunnel_type) {
+       case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
+               bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+               bp->vxlan_port = port;
+               break;
+       case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
+               bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
+               bp->geneve_port = port;
+               break;
+       default:
+               break;
+       }
+       return rc;
+}
+
+int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
+                               uint8_t tunnel_type)
+{
+       struct hwrm_tunnel_dst_port_free_input req = {0};
+       struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc = 0;
+
+       HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
+       req.tunnel_type = tunnel_type;
+       req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
+int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf)
+{
+       struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_func_cfg_input req = {0};
+       int rc;
+
+       HWRM_PREP(req, FUNC_CFG, -1, resp);
+       req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+       req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
+void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
+{
+       uint32_t *flag = flagp;
+
+       vnic->flags = *flag;
+}
+
+int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+       return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
 
 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 {
@@ -2102,6 +2413,73 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
        return rc;
 }
 
+int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       uint16_t dflt_vlan, fid;
+       uint32_t func_cfg_flags;
+       int rc = 0;
+
+       HWRM_PREP(req, FUNC_CFG, -1, resp);
+
+       if (is_vf) {
+               dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
+               fid = bp->pf.vf_info[vf].fid;
+               func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
+       } else {
+               fid = rte_cpu_to_le_16(0xffff);
+               func_cfg_flags = bp->pf.func_cfg_flags;
+               dflt_vlan = bp->vlan;
+       }
+
+       req.flags = rte_cpu_to_le_32(func_cfg_flags);
+       req.fid = rte_cpu_to_le_16(fid);
+       req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
+       req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
+int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
+                       uint16_t max_bw, uint16_t enables)
+{
+       struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_func_cfg_input req = {0};
+       int rc;
+
+       HWRM_PREP(req, FUNC_CFG, -1, resp);
+       req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+       req.enables |= rte_cpu_to_le_32(enables);
+       req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+       req.max_bw = rte_cpu_to_le_32(max_bw);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
+int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc = 0;
+
+       HWRM_PREP(req, FUNC_CFG, -1, resp);
+       req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+       req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+       req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
+       req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
                              void *encaped, size_t ec_size)
 {
@@ -2162,3 +2540,326 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
 
        return rc;
 }
+
+int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
+                        struct rte_eth_stats *stats)
+{
+       int rc = 0;
+       struct hwrm_stat_ctx_query_input req = {.req_type = 0};
+       struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
+
+       HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
+
+       req.stat_ctx_id = rte_cpu_to_le_32(cid);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT;
+
+       stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+       stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
+       stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
+       stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+       stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
+       stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
+
+       stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+       stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
+       stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
+       stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+       stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
+       stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+
+       stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
+       stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
+       stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
+
+       return rc;
+}
+
+int bnxt_hwrm_port_qstats(struct bnxt *bp)
+{
+       struct hwrm_port_qstats_input req = {0};
+       struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+       struct bnxt_pf_info *pf = &bp->pf;
+       int rc;
+
+       if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+               return 0;
+
+       HWRM_PREP(req, PORT_QSTATS, -1, resp);
+       req.port_id = rte_cpu_to_le_16(pf->port_id);
+       req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
+       req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+       return rc;
+}
+
+int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
+{
+       struct hwrm_port_clr_stats_input req = {0};
+       struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+       struct bnxt_pf_info *pf = &bp->pf;
+       int rc;
+
+       if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+               return 0;
+
+       HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
+       req.port_id = rte_cpu_to_le_16(pf->port_id);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+       return rc;
+}
+
+int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
+{
+       struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_port_led_qcaps_input req = {0};
+       int rc;
+
+       if (BNXT_VF(bp))
+               return 0;
+
+       HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
+       req.port_id = bp->pf.port_id;
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
+               unsigned int i;
+
+               bp->num_leds = resp->num_leds;
+               memcpy(bp->leds, &resp->led0_id,
+                       sizeof(bp->leds[0]) * bp->num_leds);
+               for (i = 0; i < bp->num_leds; i++) {
+                       struct bnxt_led_info *led = &bp->leds[i];
+
+                       uint16_t caps = led->led_state_caps;
+
+                       if (!led->led_group_id ||
+                               !BNXT_LED_ALT_BLINK_CAP(caps)) {
+                               bp->num_leds = 0;
+                               break;
+                       }
+               }
+       }
+       return rc;
+}
+
+int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
+{
+       struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_port_led_cfg_input req = {0};
+       struct bnxt_led_cfg *led_cfg;
+       uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
+       uint16_t duration = 0;
+       int rc, i;
+
+       if (!bp->num_leds || BNXT_VF(bp))
+               return -EOPNOTSUPP;
+
+       HWRM_PREP(req, PORT_LED_CFG, -1, resp);
+       if (led_on) {
+               led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
+               duration = rte_cpu_to_le_16(500);
+       }
+       req.port_id = bp->pf.port_id;
+       req.num_leds = bp->num_leds;
+       led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
+       for (i = 0; i < bp->num_leds; i++, led_cfg++) {
+               req.enables |= BNXT_LED_DFLT_ENABLES(i);
+               led_cfg->led_id = bp->leds[i].led_id;
+               led_cfg->led_state = led_state;
+               led_cfg->led_blink_on = duration;
+               led_cfg->led_blink_off = duration;
+               led_cfg->led_group_id = bp->leds[i].led_group_id;
+       }
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
+static void bnxt_vnic_count(struct bnxt_vnic_info *vnic, void *cbdata)
+{
+       uint32_t *count = cbdata;
+
+       if (vnic->func_default)
+               *count = *count + 1;
+}
+
+static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
+                                    struct bnxt_vnic_info *vnic __rte_unused)
+{
+       return 0;
+}
+
+int bnxt_vf_default_vnic_count(struct bnxt *bp, uint16_t vf)
+{
+       uint32_t count = 0;
+
+       bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
+           &count, bnxt_vnic_count_hwrm_stub);
+
+       return count;
+}
+
+static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
+                                       uint16_t *vnic_ids)
+{
+       struct hwrm_func_vf_vnic_ids_query_input req = {0};
+       struct hwrm_func_vf_vnic_ids_query_output *resp =
+                                               bp->hwrm_cmd_resp_addr;
+       int rc;
+
+       /* First query all VNIC ids */
+       HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
+
+       req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
+       req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
+       req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
+
+       if (req.vnic_id_tbl_addr == 0) {
+               RTE_LOG(ERR, PMD,
+               "unable to map VNIC ID table address to physical memory\n");
+               return -ENOMEM;
+       }
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       if (rc) {
+               RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
+               return -1;
+       } else if (resp->error_code) {
+               rc = rte_le_to_cpu_16(resp->error_code);
+               RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
+               return -1;
+       }
+
+       return rte_le_to_cpu_32(resp->vnic_id_cnt);
+}
+
+/*
+ * This function queries the VNIC IDs  for a specified VF. It then calls
+ * the vnic_cb to update the necessary field in vnic_info with cbdata.
+ * Then it calls the hwrm_cb function to program this new vnic configuration.
+ */
+int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
+       void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
+       int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
+{
+       struct bnxt_vnic_info vnic;
+       int rc = 0;
+       int i, num_vnic_ids;
+       uint16_t *vnic_ids;
+       size_t vnic_id_sz;
+       size_t sz;
+
+       /* First query all VNIC ids */
+       vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+       vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
+                       RTE_CACHE_LINE_SIZE);
+       if (vnic_ids == NULL) {
+               rc = -ENOMEM;
+               return rc;
+       }
+       for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
+               rte_mem_lock_page(((char *)vnic_ids) + sz);
+
+       num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
+
+       if (num_vnic_ids < 0)
+               return num_vnic_ids;
+
+       /* Retrieve VNIC, update bd_stall then update */
+
+       for (i = 0; i < num_vnic_ids; i++) {
+               memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
+               vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
+               rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
+               if (rc)
+                       break;
+               if (vnic.mru == 4)      /* Indicates unallocated */
+                       continue;
+
+               vnic_cb(&vnic, cbdata);
+
+               rc = hwrm_cb(bp, &vnic);
+               if (rc)
+                       break;
+       }
+
+       rte_free(vnic_ids);
+
+       return rc;
+}
+
+int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
+                                             bool on)
+{
+       struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_func_cfg_input req = {0};
+       int rc;
+
+       HWRM_PREP(req, FUNC_CFG, -1, resp);
+       req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+       req.enables |= rte_cpu_to_le_32(
+                       HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
+       req.vlan_antispoof_mode = on ?
+               HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
+               HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
+int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
+{
+       struct bnxt_vnic_info vnic;
+       uint16_t *vnic_ids;
+       size_t vnic_id_sz;
+       int num_vnic_ids, i;
+       size_t sz;
+       int rc;
+
+       vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+       vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
+                       RTE_CACHE_LINE_SIZE);
+       if (vnic_ids == NULL) {
+               rc = -ENOMEM;
+               return rc;
+       }
+
+       for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
+               rte_mem_lock_page(((char *)vnic_ids) + sz);
+
+       rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
+       if (rc <= 0)
+               goto exit;
+       num_vnic_ids = rc;
+
+       /*
+        * Loop through to find the default VNIC ID.
+        * TODO: The easier way would be to obtain the resp->dflt_vnic_id
+        * by sending the hwrm_func_qcfg command to the firmware.
+        */
+       for (i = 0; i < num_vnic_ids; i++) {
+               memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
+               vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
+               rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
+                                       bp->pf.first_vf_id + vf);
+               if (rc)
+                       goto exit;
+               if (vnic.func_default) {
+                       rte_free(vnic_ids);
+                       return vnic.fw_vnic_id;
+               }
+       }
+       /* Could not find a default VNIC. */
+       RTE_LOG(ERR, PMD, "No default VNIC\n");
+exit:
+       rte_free(vnic_ids);
+       return -1;
+}