rte_spinlock_unlock(&bp->hwrm_lock); \
if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
rc = -EACCES; \
- else if (rc > 0) \
+ else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
+ rc = -ENOSPC; \
+ else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
rc = -EINVAL; \
+ else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
+ rc = -ENOTSUP; \
+ else if (rc > 0) \
+ rc = -EIO; \
return rc; \
} \
if (resp->error_code) { \
rte_spinlock_unlock(&bp->hwrm_lock); \
if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
rc = -EACCES; \
- else if (rc > 0) \
+ else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
+ rc = -ENOSPC; \
+ else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
rc = -EINVAL; \
+ else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
+ rc = -ENOTSUP; \
+ else if (rc > 0) \
+ rc = -EIO; \
return rc; \
} \
} while (0)
struct bnxt_filter_info *filter)
{
int rc = 0;
+ struct bnxt_filter_info *l2_filter = filter;
struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
if (filter->fw_l2_filter_id == UINT64_MAX)
return 0;
+ if (filter->matching_l2_fltr_ptr)
+ l2_filter = filter->matching_l2_fltr_ptr;
+
+ PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
+ filter, l2_filter, l2_filter->l2_ref_cnt);
+
+ if (l2_filter->l2_ref_cnt > 0)
+ l2_filter->l2_ref_cnt--;
+
+ if (l2_filter->l2_ref_cnt > 0)
+ return 0;
+
HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
req.flags = rte_cpu_to_le_32(filter->flags);
- req.flags |=
- rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
enables = filter->enables |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
req.src_id = rte_cpu_to_le_32(filter->src_id);
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
req.src_type = filter->src_type;
+ if (filter->pri_hint) {
+ req.pri_hint = filter->pri_hint;
+ req.l2_filter_id_hint =
+ rte_cpu_to_le_64(filter->l2_filter_id_hint);
+ }
req.enables = rte_cpu_to_le_32(enables);
bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
- bp->max_l2_ctx =
- rte_le_to_cpu_16(resp->max_l2_ctxs) + bp->max_rx_em_flows;
+ bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ if (!BNXT_CHIP_THOR(bp))
+ bp->max_l2_ctx += bp->max_rx_em_flows;
/* TODO: For now, do not support VMDq/RFS on VFs. */
if (BNXT_PF(bp)) {
if (bp->pf.max_vfs)
rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
+ ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
req.async_event_fwd[0] |=
/* func_resource_qcaps does not return max_rx_em_flows.
* So use the value provided by func_qcaps.
*/
- bp->max_l2_ctx =
- rte_le_to_cpu_16(resp->max_l2_ctxs) +
- bp->max_rx_em_flows;
+ bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ if (!BNXT_CHIP_THOR(bp))
+ bp->max_l2_ctx += bp->max_rx_em_flows;
bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
}
if (dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
+ if (dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
+ bp->flags |= BNXT_FLAG_ADV_FLOW_MGMT;
+ PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
+ }
error:
HWRM_UNLOCK();
return rc;
}
-int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
- struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
+static
+int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
{
int rc = 0;
struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
return rc;
}
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+
+ if (BNXT_CHIP_THOR(bp)) {
+ int j;
+
+ for (j = 0; j < vnic->num_lb_ctxts; j++) {
+ rc = _bnxt_hwrm_vnic_ctx_free(bp,
+ vnic,
+ vnic->fw_grp_ids[j]);
+ vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
+ }
+ vnic->num_lb_ctxts = 0;
+ } else {
+ rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
+ vnic->rss_rule = INVALID_HW_RING_ID;
+ }
+
+ return rc;
+}
+
int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc = 0;
else
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
+ bnxt_free_filter(bp, filter);
//if (rc)
//break;
}
struct rte_flow *flow;
int rc = 0;
- STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+ while (!STAILQ_EMPTY(&vnic->flow_list)) {
+ flow = STAILQ_FIRST(&vnic->flow_list);
filter = flow->filter;
PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
if (filter->filter_type == HWRM_CFA_EM_FILTER)
void bnxt_free_all_hwrm_resources(struct bnxt *bp)
{
- int i, j;
+ int i;
if (bp->vnic_info == NULL)
return;
* Cleanup VNICs in reverse order, to make sure the L2 filter
* from vnic0 is last to be cleaned up.
*/
- for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ for (i = bp->max_vnics - 1; i >= 0; i--) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
- if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
- return;
- }
+ // If the VNIC ID is invalid we are not currently using the VNIC
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ continue;
bnxt_clear_hwrm_vnic_flows(bp, vnic);
bnxt_clear_hwrm_vnic_filters(bp, vnic);
- if (BNXT_CHIP_THOR(bp)) {
- for (j = 0; j < vnic->num_lb_ctxts; j++) {
- bnxt_hwrm_vnic_ctx_free(bp, vnic,
- vnic->fw_grp_ids[j]);
- vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
- }
- vnic->num_lb_ctxts = 0;
- } else {
- bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
- vnic->rss_rule = INVALID_HW_RING_ID;
- }
+ bnxt_hwrm_vnic_ctx_free(bp, vnic);
bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
goto port_phy_cfg;
autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
+ if (BNXT_CHIP_THOR(bp) &&
+ dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
+ /* 40G is not supported as part of media auto detect.
+ * The speed should be forced and autoneg disabled
+ * to configure 40G speed.
+ */
+ PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
+ autoneg = 0;
+ }
+
speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
- /* Autoneg can be done only when the FW allows */
- if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
- bp->link_info.force_link_speed)) {
+ /* Autoneg can be done only when the FW allows.
+ * When user configures fixed speed of 40G and later changes to
+ * any other speed, auto_link_speed/force_link_speed is still set
+ * to 40G until link comes up at new speed.
+ */
+ if (autoneg == 1 &&
+ !(!BNXT_CHIP_THOR(bp) &&
+ (bp->link_info.auto_link_speed ||
+ bp->link_info.force_link_speed))) {
link_req.phy_flags |=
HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
link_req.auto_link_speed_mask =
if (BNXT_CHIP_THOR(bp))
return bnxt_vnic_rss_configure_thor(bp, vnic);
- /*
- * Fill the RSS hash & redirection table with
- * ring group ids for all VNICs
- */
- for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
- rss_idx++, fw_idx++) {
- for (i = 0; i < bp->rx_cp_nr_rings; i++) {
- fw_idx %= bp->rx_cp_nr_rings;
- if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
- break;
- fw_idx++;
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ return 0;
+
+ if (vnic->rss_table && vnic->hash_type) {
+ /*
+ * Fill the RSS hash & redirection table with
+ * ring group ids for all VNICs
+ */
+ for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+ rss_idx++, fw_idx++) {
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ fw_idx %= bp->rx_cp_nr_rings;
+ if (vnic->fw_grp_ids[fw_idx] !=
+ INVALID_HW_RING_ID)
+ break;
+ fw_idx++;
+ }
+ if (i == bp->rx_cp_nr_rings)
+ return 0;
+ vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
}
- if (i == bp->rx_cp_nr_rings)
- return 0;
- vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
+ return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
}
- return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+
+ return 0;
}
static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
struct hwrm_func_backing_store_qcaps_input req = {0};
struct hwrm_func_backing_store_qcaps_output *resp =
bp->hwrm_cmd_resp_addr;
- int rc;
+ struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_info *ctx;
+ int total_alloc_len;
+ int rc, i;
if (!BNXT_CHIP_THOR(bp) ||
bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT_SILENT();
- if (!rc) {
- struct bnxt_ctx_pg_info *ctx_pg;
- struct bnxt_ctx_mem_info *ctx;
- int total_alloc_len;
- int i;
-
- total_alloc_len = sizeof(*ctx);
- ctx = rte_malloc("bnxt_ctx_mem", total_alloc_len,
- RTE_CACHE_LINE_SIZE);
- if (!ctx) {
- rc = -ENOMEM;
- goto ctx_err;
- }
- memset(ctx, 0, total_alloc_len);
+ total_alloc_len = sizeof(*ctx);
+ ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
+ RTE_CACHE_LINE_SIZE);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
- ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
- sizeof(*ctx_pg) * BNXT_MAX_Q,
- RTE_CACHE_LINE_SIZE);
- if (!ctx_pg) {
- rc = -ENOMEM;
- goto ctx_err;
- }
- for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
- ctx->tqm_mem[i] = ctx_pg;
-
- bp->ctx = ctx;
- ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
- ctx->qp_min_qp1_entries =
- rte_le_to_cpu_16(resp->qp_min_qp1_entries);
- ctx->qp_max_l2_entries =
- rte_le_to_cpu_16(resp->qp_max_l2_entries);
- ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
- ctx->srq_max_l2_entries =
- rte_le_to_cpu_16(resp->srq_max_l2_entries);
- ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
- ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
- ctx->cq_max_l2_entries =
- rte_le_to_cpu_16(resp->cq_max_l2_entries);
- ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
- ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
- ctx->vnic_max_vnic_entries =
- rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
- ctx->vnic_max_ring_table_entries =
- rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
- ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
- ctx->stat_max_entries =
- rte_le_to_cpu_32(resp->stat_max_entries);
- ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
- ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
- ctx->tqm_min_entries_per_ring =
- rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
- ctx->tqm_max_entries_per_ring =
- rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
- ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
- if (!ctx->tqm_entries_multiple)
- ctx->tqm_entries_multiple = 1;
- ctx->mrav_max_entries =
- rte_le_to_cpu_32(resp->mrav_max_entries);
- ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
- ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
- ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
- } else {
- rc = 0;
+ ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
+ sizeof(*ctx_pg) * BNXT_MAX_Q,
+ RTE_CACHE_LINE_SIZE);
+ if (!ctx_pg) {
+ rc = -ENOMEM;
+ goto ctx_err;
}
+ for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
+ ctx->tqm_mem[i] = ctx_pg;
+
+ bp->ctx = ctx;
+ ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
+ ctx->qp_min_qp1_entries =
+ rte_le_to_cpu_16(resp->qp_min_qp1_entries);
+ ctx->qp_max_l2_entries =
+ rte_le_to_cpu_16(resp->qp_max_l2_entries);
+ ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
+ ctx->srq_max_l2_entries =
+ rte_le_to_cpu_16(resp->srq_max_l2_entries);
+ ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
+ ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
+ ctx->cq_max_l2_entries =
+ rte_le_to_cpu_16(resp->cq_max_l2_entries);
+ ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
+ ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
+ ctx->vnic_max_vnic_entries =
+ rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
+ ctx->vnic_max_ring_table_entries =
+ rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
+ ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
+ ctx->stat_max_entries =
+ rte_le_to_cpu_32(resp->stat_max_entries);
+ ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
+ ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
+ ctx->tqm_min_entries_per_ring =
+ rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
+ ctx->tqm_max_entries_per_ring =
+ rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
+ ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
+ if (!ctx->tqm_entries_multiple)
+ ctx->tqm_entries_multiple = 1;
+ ctx->mrav_max_entries =
+ rte_le_to_cpu_32(resp->mrav_max_entries);
+ ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
+ ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
+ ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
ctx_err:
HWRM_UNLOCK();
return rc;
return rc;
}
+
+int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
+{
+ struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
+ uint32_t flags = 0;
+ int rc = 0;
+
+ if (!(bp->flags & BNXT_FLAG_ADV_FLOW_MGMT))
+ return rc;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(req, CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+
+ HWRM_CHECK_RESULT();
+ flags = rte_le_to_cpu_32(resp->flags);
+ HWRM_UNLOCK();
+
+ if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN) {
+ bp->flow_flags |= BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN;
+ PMD_DRV_LOG(INFO, "Source L2 header filtering enabled\n");
+ }
+
+ return rc;
+}