+ if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
+ return rc;
+
+ HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
+
+ req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ unsigned int idx __rte_unused)
+{
+ int rc;
+ struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
+ struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
+
+ req.update_period_ms = rte_cpu_to_le_32(0);
+
+ req.stats_dma_addr =
+ rte_cpu_to_le_64(cpr->hw_stats_map);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
+
+ return rc;
+}
+
+int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ unsigned int idx __rte_unused)
+{
+ int rc;
+ struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
+ struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
+
+ req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0, i, j;
+ struct hwrm_vnic_alloc_input req = { 0 };
+ struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ /* map ring groups to this vnic */
+ RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
+ vnic->start_grp_id, vnic->end_grp_id);
+ for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
+ vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
+ vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
+ vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE;
+ HWRM_PREP(req, VNIC_ALLOC, -1, resp);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ struct bnxt_plcmodes_cfg *pmode)
+{
+ int rc = 0;
+ struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
+
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ pmode->flags = rte_le_to_cpu_32(resp->flags);
+ /* dflt_vnic bit doesn't exist in the _cfg command */
+ pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
+ pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
+ pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
+ pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
+
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ struct bnxt_plcmodes_cfg *pmode)
+{
+ int rc = 0;
+ struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
+
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.flags = rte_cpu_to_le_32(pmode->flags);
+ req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
+ req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
+ req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
+ );
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
+ struct bnxt_plcmodes_cfg pmodes;
+
+ rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
+ if (rc)
+ return rc;
+
+ HWRM_PREP(req, VNIC_CFG, -1, resp);
+
+ /* Only RSS support for now TBD: COS & LB */
+ req.enables =
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
+ HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
+ if (vnic->lb_rule != 0xffff)
+ ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
+ if (vnic->cos_rule != 0xffff)
+ ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
+ if (vnic->rss_rule != 0xffff)
+ ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
+ req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
+ req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
+ req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
+ req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
+ req.mru = rte_cpu_to_le_16(vnic->mru);
+ if (vnic->func_default)
+ req.flags |=
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
+ if (vnic->vlan_strip)
+ req.flags |=
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
+ if (vnic->bd_stall)
+ req.flags |=
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
+ if (vnic->roce_dual)
+ req.flags |= rte_cpu_to_le_32(
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
+ if (vnic->roce_only)
+ req.flags |= rte_cpu_to_le_32(
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
+ if (vnic->rss_dflt_cr)
+ req.flags |= rte_cpu_to_le_32(
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int16_t fw_vf_id)
+{
+ int rc = 0;
+ struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_QCFG, -1, resp);
+
+ req.enables =
+ rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.vf_id = rte_cpu_to_le_16(fw_vf_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
+ vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
+ vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
+ vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
+ vnic->mru = rte_le_to_cpu_16(resp->mru);
+ vnic->func_default = rte_le_to_cpu_32(
+ resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
+ vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
+ vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
+ vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
+ vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
+ vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
+ struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
+
+ req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ vnic->rss_rule = INVALID_HW_RING_ID;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_free_input req = {.req_type = 0 };
+ struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ return rc;
+
+ HWRM_PREP(req, VNIC_FREE, -1, resp);
+
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ return rc;
+}
+
+int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
+
+ req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+
+ req.ring_grp_tbl_addr =
+ rte_cpu_to_le_64(vnic->rss_table_dma_addr);
+ req.hash_key_tbl_addr =
+ rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
+ req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t size;
+
+ HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
+
+ req.flags = rte_cpu_to_le_32(
+ HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
+
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
+
+ size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
+ size -= RTE_PKTMBUF_HEADROOM;
+
+ req.jumbo_thresh = rte_cpu_to_le_16(size);
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool enable)
+{
+ int rc = 0;
+ struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
+
+ if (enable) {
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
+ req.flags = rte_cpu_to_le_32(
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.max_agg_segs = rte_cpu_to_le_16(5);
+ req.max_aggs =
+ rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
+ req.min_agg_len = rte_cpu_to_le_32(512);
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.enables = rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ bp->pf.vf_info[vf].random_mac = false;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
+ uint64_t *dropped)
+{
+ int rc = 0;
+ struct hwrm_func_qstats_input req = {.req_type = 0};
+ struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+
+ req.fid = rte_cpu_to_le_16(fid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ if (dropped)
+ *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
+ struct rte_eth_stats *stats)
+{
+ int rc = 0;
+ struct hwrm_func_qstats_input req = {.req_type = 0};
+ struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+
+ req.fid = rte_cpu_to_le_16(fid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+ stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
+ stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
+ stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+ stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
+ stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
+
+ stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+ stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
+ stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
+ stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+ stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
+ stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+
+ stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
+ stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
+
+ stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
+
+ return rc;
+}
+
+int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
+{
+ int rc = 0;
+ struct hwrm_func_clr_stats_input req = {.req_type = 0};
+ struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
+
+ req.fid = rte_cpu_to_le_16(fid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+/*
+ * HWRM utility functions
+ */
+
+int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq;
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+
+ if (i >= bp->rx_cp_nr_rings) {
+ txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
+ cpr = txq->cp_ring;
+ } else {
+ rxq = bp->rx_queues[i];
+ cpr = rxq->cp_ring;
+ }
+
+ rc = bnxt_hwrm_stat_clear(bp, cpr);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
+{
+ int rc;
+ unsigned int i;
+ struct bnxt_cp_ring_info *cpr;
+
+ for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
+
+ if (i >= bp->rx_cp_nr_rings)
+ cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
+ else
+ cpr = bp->rx_queues[i]->cp_ring;
+ if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
+ rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
+ cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+ /*
+ * TODO. Need a better way to reset grp_info.stats_ctx
+ * for Rx rings only. stats_ctx is not saved for Tx
+ * in grp_info.
+ */
+ bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
+ if (rc)
+ return rc;
+ }
+ }
+ return 0;
+}
+
+int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq;
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+
+ if (i >= bp->rx_cp_nr_rings) {
+ txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
+ cpr = txq->cp_ring;
+ } else {
+ rxq = bp->rx_queues[i];
+ cpr = rxq->cp_ring;
+ }
+
+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
+
+ if (rc)
+ return rc;
+ }
+ return rc;
+}
+
+int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
+{
+ uint16_t idx;
+ uint32_t rc = 0;
+
+ for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
+
+ if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to free invalid ring group %d\n",
+ idx);
+ continue;
+ }
+
+ rc = bnxt_hwrm_ring_grp_free(bp, idx);
+
+ if (rc)
+ return rc;
+ }
+ return rc;
+}
+
+static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ unsigned int idx __rte_unused)
+{
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+
+ bnxt_hwrm_ring_free(bp, cp_ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
+ cp_ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
+ memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
+ sizeof(*cpr->cp_desc_ring));
+ cpr->cp_raw_cons = 0;
+}
+
+int bnxt_free_all_hwrm_rings(struct bnxt *bp)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ unsigned int idx = bp->rx_cp_nr_rings + i + 1;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_TX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ memset(txr->tx_desc_ring, 0,
+ txr->tx_ring_struct->ring_size *
+ sizeof(*txr->tx_desc_ring));
+ memset(txr->tx_buf_ring, 0,
+ txr->tx_ring_struct->ring_size *
+ sizeof(*txr->tx_buf_ring));
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+ }
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_free_cp_ring(bp, cpr, idx);
+ cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_ring *ring = rxr->rx_ring_struct;
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ unsigned int idx = i + 1;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+ memset(rxr->rx_desc_ring, 0,
+ rxr->rx_ring_struct->ring_size *
+ sizeof(*rxr->rx_desc_ring));
+ memset(rxr->rx_buf_ring, 0,
+ rxr->rx_ring_struct->ring_size *
+ sizeof(*rxr->rx_buf_ring));
+ rxr->rx_prod = 0;
+ memset(rxr->ag_buf_ring, 0,
+ rxr->ag_ring_struct->ring_size *
+ sizeof(*rxr->ag_buf_ring));
+ rxr->ag_prod = 0;
+ }
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_free_cp_ring(bp, cpr, idx);
+ bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+ cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+
+ /* Default completion ring */
+ {
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_free_cp_ring(bp, cpr, 0);
+ cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+
+ return rc;
+}
+
+int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
+{
+ uint16_t i;
+ uint32_t rc = 0;
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ rc = bnxt_hwrm_ring_grp_alloc(bp, i);
+ if (rc)
+ return rc;
+ }
+ return rc;
+}
+
+void bnxt_free_hwrm_resources(struct bnxt *bp)
+{
+ /* Release memzone */
+ rte_free(bp->hwrm_cmd_resp_addr);
+ bp->hwrm_cmd_resp_addr = NULL;
+ bp->hwrm_cmd_resp_dma_addr = 0;
+}
+
+int bnxt_alloc_hwrm_resources(struct bnxt *bp)
+{
+ struct rte_pci_device *pdev = bp->pdev;
+ char type[RTE_MEMZONE_NAMESIZE];
+
+ sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
+ pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+ bp->max_req_len = HWRM_MAX_REQ_LEN;
+ bp->max_resp_len = HWRM_MAX_RESP_LEN;
+ bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
+ rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
+ if (bp->hwrm_cmd_resp_addr == NULL)
+ return -ENOMEM;
+ bp->hwrm_cmd_resp_dma_addr =
+ rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+ if (bp->hwrm_cmd_resp_dma_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map response address to physical memory\n");
+ return -ENOMEM;
+ }
+ rte_spinlock_init(&bp->hwrm_lock);
+
+ return 0;
+}
+
+int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ struct bnxt_filter_info *filter;
+ int rc = 0;
+
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ rc = bnxt_hwrm_clear_filter(bp, filter);
+ if (rc)
+ break;
+ }
+ return rc;
+}
+
+int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ struct bnxt_filter_info *filter;
+ int rc = 0;
+
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
+ if (rc)
+ break;
+ }
+ return rc;
+}
+
+void bnxt_free_tunnel_ports(struct bnxt *bp)
+{
+ if (bp->vxlan_port_cnt)
+ bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
+ bp->vxlan_port = 0;
+ if (bp->geneve_port_cnt)
+ bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
+ bp->geneve_port = 0;
+}
+
+void bnxt_free_all_hwrm_resources(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+ unsigned int i;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+ if (BNXT_PF(bp))
+ bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
+
+ /* VNIC resources */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ bnxt_clear_hwrm_vnic_filters(bp, vnic);
+
+ bnxt_hwrm_vnic_ctx_free(bp, vnic);
+
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
+
+ bnxt_hwrm_vnic_free(bp, vnic);
+ }
+ /* Ring resources */
+ bnxt_free_all_hwrm_rings(bp);
+ bnxt_free_all_hwrm_ring_grps(bp);
+ bnxt_free_all_hwrm_stat_ctxs(bp);
+ bnxt_free_tunnel_ports(bp);
+}
+
+static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
+{
+ uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
+
+ if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+ return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
+
+ switch (conf_link_speed) {
+ case ETH_LINK_SPEED_10M_HD:
+ case ETH_LINK_SPEED_100M_HD:
+ return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
+ }
+ return hw_link_duplex;
+}
+
+static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
+{
+ uint16_t eth_link_speed = 0;
+
+ if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
+ return ETH_LINK_SPEED_AUTONEG;
+
+ switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
+ case ETH_LINK_SPEED_100M:
+ case ETH_LINK_SPEED_100M_HD:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
+ break;
+ case ETH_LINK_SPEED_1G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
+ break;
+ case ETH_LINK_SPEED_2_5G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
+ break;
+ case ETH_LINK_SPEED_10G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
+ break;
+ case ETH_LINK_SPEED_20G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
+ break;
+ case ETH_LINK_SPEED_25G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
+ break;
+ case ETH_LINK_SPEED_40G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
+ break;
+ case ETH_LINK_SPEED_50G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
+ break;
+ default:
+ RTE_LOG(ERR, PMD,
+ "Unsupported link speed %d; default to AUTO\n",
+ conf_link_speed);
+ break;
+ }
+ return eth_link_speed;
+}
+
+#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
+ ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
+ ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
+
+static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
+{
+ uint32_t one_speed;
+
+ if (link_speed == ETH_LINK_SPEED_AUTONEG)
+ return 0;
+
+ if (link_speed & ETH_LINK_SPEED_FIXED) {
+ one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
+
+ if (one_speed & (one_speed - 1)) {
+ RTE_LOG(ERR, PMD,
+ "Invalid advertised speeds (%u) for port %u\n",
+ link_speed, port_id);
+ return -EINVAL;
+ }
+ if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported advertised speed (%u) for port %u\n",
+ link_speed, port_id);
+ return -EINVAL;
+ }
+ } else {
+ if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported advertised speeds (%u) for port %u\n",
+ link_speed, port_id);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
+{
+ uint16_t ret = 0;
+
+ if (link_speed == ETH_LINK_SPEED_AUTONEG)
+ link_speed = BNXT_SUPPORTED_SPEEDS;
+
+ if (link_speed & ETH_LINK_SPEED_100M)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
+ if (link_speed & ETH_LINK_SPEED_100M_HD)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
+ if (link_speed & ETH_LINK_SPEED_1G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
+ if (link_speed & ETH_LINK_SPEED_2_5G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
+ if (link_speed & ETH_LINK_SPEED_10G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
+ if (link_speed & ETH_LINK_SPEED_20G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
+ if (link_speed & ETH_LINK_SPEED_25G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
+ if (link_speed & ETH_LINK_SPEED_40G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
+ if (link_speed & ETH_LINK_SPEED_50G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
+ return ret;
+}
+
+static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
+{
+ uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
+
+ switch (hw_link_speed) {
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
+ eth_link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
+ eth_link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
+ eth_link_speed = ETH_SPEED_NUM_2_5G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
+ eth_link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
+ eth_link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
+ eth_link_speed = ETH_SPEED_NUM_25G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
+ eth_link_speed = ETH_SPEED_NUM_40G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
+ eth_link_speed = ETH_SPEED_NUM_50G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
+ default:
+ RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
+ hw_link_speed);
+ break;
+ }
+ return eth_link_speed;
+}
+
+static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
+{
+ uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ switch (hw_link_duplex) {
+ case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
+ case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
+ eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
+ eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
+ hw_link_duplex);
+ break;
+ }
+ return eth_link_duplex;
+}
+
+int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
+{
+ int rc = 0;
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Get link config failed with rc %d\n", rc);
+ goto exit;
+ }
+ if (link_info->link_up)
+ link->link_speed =
+ bnxt_parse_hw_link_speed(link_info->link_speed);
+ else
+ link->link_speed = ETH_SPEED_NUM_NONE;
+ link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
+ link->link_status = link_info->link_up;
+ link->link_autoneg = link_info->auto_mode ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
+ ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
+exit:
+ return rc;
+}
+
+int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
+{
+ int rc = 0;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_link_info link_req;
+ uint16_t speed;
+
+ if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
+ return 0;
+
+ rc = bnxt_valid_link_speed(dev_conf->link_speeds,