X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_hwrm.c;h=5cb394994ef4f2dbd7cc262340e4c335ab7406e9;hb=49d0709b257fc3906271afcbb256ff255f02ed68;hp=045ce4a9c2c907dffbfced1b5f4b8c3a35b669e8;hpb=5151df16fb640019692c96b0126d608f8c653173;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index 045ce4a9c2..5cb394994e 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -27,6 +27,7 @@ #include #define HWRM_CMD_TIMEOUT 6000000 +#define HWRM_SHORT_CMD_TIMEOUT 50000 #define HWRM_SPEC_CODE_1_8_3 0x10803 #define HWRM_VERSION_1_9_1 0x10901 #define HWRM_VERSION_1_9_2 0x10903 @@ -97,6 +98,17 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET; uint16_t mb_trigger_offset = use_kong_mb ? GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER; + uint32_t timeout; + + /* Do not send HWRM commands to firmware in error state */ + if (bp->flags & BNXT_FLAG_FATAL_ERROR) + return 0; + + /* For VER_GET command, set timeout as 50ms */ + if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET) + timeout = HWRM_SHORT_CMD_TIMEOUT; + else + timeout = HWRM_CMD_TIMEOUT; if (bp->flags & BNXT_FLAG_SHORT_CMD || msg_len > bp->max_req_len) { @@ -115,9 +127,6 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, data = (uint32_t *)&short_input; msg_len = sizeof(short_input); - /* Sync memory write before updating doorbell */ - rte_wmb(); - max_req_len = BNXT_HWRM_SHORT_REQ_LEN; } @@ -137,11 +146,17 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, /* Ring channel doorbell */ bar = (uint8_t *)bp->bar0 + mb_trigger_offset; rte_write32(1, bar); + /* + * Make sure the channel doorbell ring command complete before + * reading the response to avoid getting stale or invalid + * responses. + */ + rte_io_mb(); /* Poll for the valid bit */ - for (i = 0; i < HWRM_CMD_TIMEOUT; i++) { + for (i = 0; i < timeout; i++) { /* Sanity check on the resp->resp_len */ - rte_rmb(); + rte_cio_rmb(); if (resp->resp_len && resp->resp_len <= bp->max_resp_len) { /* Last byte of resp contains the valid key */ valid = (uint8_t *)resp + resp->resp_len - 1; @@ -151,7 +166,12 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, rte_delay_us(1); } - if (i >= HWRM_CMD_TIMEOUT) { + if (i >= timeout) { + /* Suppress VER_GET timeout messages during reset recovery */ + if (bp->flags & BNXT_FLAG_FW_RESET && + rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET) + return -ETIMEDOUT; + PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n", req->req_type); return -ETIMEDOUT; @@ -199,8 +219,14 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, rte_spinlock_unlock(&bp->hwrm_lock); \ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \ rc = -EACCES; \ - else if (rc > 0) \ + else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \ + rc = -ENOSPC; \ + else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \ rc = -EINVAL; \ + else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \ + rc = -ENOTSUP; \ + else if (rc > 0) \ + rc = -EIO; \ return rc; \ } \ if (resp->error_code) { \ @@ -221,8 +247,14 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, rte_spinlock_unlock(&bp->hwrm_lock); \ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \ rc = -EACCES; \ - else if (rc > 0) \ + else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \ + rc = -ENOSPC; \ + else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \ rc = -EINVAL; \ + else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \ + rc = -ENOTSUP; \ + else if (rc > 0) \ + rc = -EIO; \ return rc; \ } \ } while (0) @@ -341,12 +373,25 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, struct bnxt_filter_info *filter) { int rc = 0; + struct bnxt_filter_info *l2_filter = filter; struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 }; struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr; if (filter->fw_l2_filter_id == UINT64_MAX) return 0; + if (filter->matching_l2_fltr_ptr) + l2_filter = filter->matching_l2_fltr_ptr; + + PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n", + filter, l2_filter, l2_filter->l2_ref_cnt); + + if (l2_filter->l2_ref_cnt > 0) + l2_filter->l2_ref_cnt--; + + if (l2_filter->l2_ref_cnt > 0) + return 0; + HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB); req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); @@ -393,8 +438,6 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(filter->flags); - req.flags |= - rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST); enables = filter->enables | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID; @@ -424,6 +467,11 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, req.src_id = rte_cpu_to_le_32(filter->src_id); if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE) req.src_type = filter->src_type; + if (filter->pri_hint) { + req.pri_hint = filter->pri_hint; + req.l2_filter_id_hint = + rte_cpu_to_le_64(filter->l2_filter_id_hint); + } req.enables = rte_cpu_to_le_32(enables); @@ -489,31 +537,37 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) HWRM_CHECK_RESULT(); - if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) + if (!BNXT_CHIP_THOR(bp) && + !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) return 0; + if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS) + bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS; + ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0); if (!ptp) return -ENOMEM; - ptp->rx_regs[BNXT_PTP_RX_TS_L] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_lower); - ptp->rx_regs[BNXT_PTP_RX_TS_H] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_upper); - ptp->rx_regs[BNXT_PTP_RX_SEQ] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id); - ptp->rx_regs[BNXT_PTP_RX_FIFO] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo); - ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv); - ptp->tx_regs[BNXT_PTP_TX_TS_L] = - rte_le_to_cpu_32(resp->tx_ts_reg_off_lower); - ptp->tx_regs[BNXT_PTP_TX_TS_H] = - rte_le_to_cpu_32(resp->tx_ts_reg_off_upper); - ptp->tx_regs[BNXT_PTP_TX_SEQ] = - rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id); - ptp->tx_regs[BNXT_PTP_TX_FIFO] = - rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo); + if (!BNXT_CHIP_THOR(bp)) { + ptp->rx_regs[BNXT_PTP_RX_TS_L] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_lower); + ptp->rx_regs[BNXT_PTP_RX_TS_H] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_upper); + ptp->rx_regs[BNXT_PTP_RX_SEQ] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id); + ptp->rx_regs[BNXT_PTP_RX_FIFO] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo); + ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv); + ptp->tx_regs[BNXT_PTP_TX_TS_L] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_lower); + ptp->tx_regs[BNXT_PTP_TX_TS_H] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_upper); + ptp->tx_regs[BNXT_PTP_TX_SEQ] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id); + ptp->tx_regs[BNXT_PTP_TX_FIFO] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo); + } ptp->bp = bp; bp->ptp_cfg = ptp; @@ -588,8 +642,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows); - bp->max_l2_ctx = - rte_le_to_cpu_16(resp->max_l2_ctxs) + bp->max_rx_em_flows; + bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + if (!BNXT_CHIP_THOR(bp)) + bp->max_l2_ctx += bp->max_rx_em_flows; /* TODO: For now, do not support VMDq/RFS on VFs. */ if (BNXT_PF(bp)) { if (bp->pf.max_vfs) @@ -613,6 +668,18 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED) bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED; + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) { + bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY; + PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n"); + } else { + bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY; + } + + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD) + bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD; + else + bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD; + HWRM_UNLOCK(); return rc; @@ -657,12 +724,23 @@ int bnxt_hwrm_func_reset(struct bnxt *bp) int bnxt_hwrm_func_driver_register(struct bnxt *bp) { int rc; + uint32_t flags = 0; struct hwrm_func_drv_rgtr_input req = {.req_type = 0 }; struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; if (bp->flags & BNXT_FLAG_REGISTERED) return 0; + flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT; + if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY) + flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT; + + /* PFs and trusted VFs should indicate the support of the + * Master capability on non Stingray platform + */ + if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp)) + flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT; + HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER | HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD); @@ -683,14 +761,20 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) * this HWRM sniffer list in FW because DPDK PF driver does * not support this. */ - req.flags = - rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE); + flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE; } + req.flags = rte_cpu_to_le_32(flags); + req.async_event_fwd[0] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE | ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED | - ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE); + ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE | + ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE | + ASYNC_CMPL_EVENT_ID_RESET_NOTIFY); + if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY) + req.async_event_fwd[0] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY); req.async_event_fwd[1] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD | ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE); @@ -698,6 +782,11 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); + + flags = rte_le_to_cpu_32(resp->flags); + if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED) + bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE; + HWRM_UNLOCK(); bp->flags |= BNXT_FLAG_REGISTERED; @@ -737,9 +826,12 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test) req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings); req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings * AGG_RING_MULTIPLIER); - req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings); + req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + + bp->tx_nr_rings + + BNXT_NUM_ASYNC_CPR(bp)); req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings + - bp->tx_nr_rings); + bp->tx_nr_rings + + BNXT_NUM_ASYNC_CPR(bp)); req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings); if (bp->vf_resv_strategy == HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) { @@ -798,9 +890,9 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) /* func_resource_qcaps does not return max_rx_em_flows. * So use the value provided by func_qcaps. */ - bp->max_l2_ctx = - rte_le_to_cpu_16(resp->max_l2_ctxs) + - bp->max_rx_em_flows; + bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + if (!BNXT_CHIP_THOR(bp)) + bp->max_l2_ctx += bp->max_rx_em_flows; bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); } @@ -834,7 +926,10 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - HWRM_CHECK_RESULT(); + if (bp->flags & BNXT_FLAG_FW_RESET) + HWRM_CHECK_RESULT_SILENT(); + else + HWRM_CHECK_RESULT(); PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n", resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, @@ -938,6 +1033,11 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n"); + if (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) { + bp->flags |= BNXT_FLAG_ADV_FLOW_MGMT; + PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n"); + } error: HWRM_UNLOCK(); @@ -961,8 +1061,6 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags) HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - bp->flags &= ~BNXT_FLAG_REGISTERED; - return rc; } @@ -1173,6 +1271,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, mb_pool = bp->rx_queues[0]->mb_pool; rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) - RTE_PKTMBUF_HEADROOM; + rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size); req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size); enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID; @@ -1203,6 +1302,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, mb_pool = bp->rx_queues[0]->mb_pool; rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) - RTE_PKTMBUF_HEADROOM; + rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size); req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size); req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id); @@ -1819,6 +1919,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); size -= RTE_PKTMBUF_HEADROOM; + size = RTE_MIN(BNXT_MAX_PKT_LEN, size); req.jumbo_thresh = rte_cpu_to_le_16(size); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); @@ -2073,7 +2174,7 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) return rc; } -static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) { struct bnxt_ring *cp_ring = cpr->cp_ring_struct; @@ -2083,9 +2184,10 @@ static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring)); cpr->cp_raw_cons = 0; + cpr->valid = 0; } -static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) { struct bnxt_ring *cp_ring = cpr->cp_ring_struct; @@ -2246,6 +2348,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) else rc = bnxt_hwrm_clear_l2_filter(bp, filter); STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); + bnxt_free_filter(bp, filter); //if (rc) //break; } @@ -2259,9 +2362,10 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct rte_flow *flow; int rc = 0; - STAILQ_FOREACH(flow, &vnic->flow_list, next) { + while (!STAILQ_EMPTY(&vnic->flow_list)) { + flow = STAILQ_FIRST(&vnic->flow_list); filter = flow->filter; - PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type); + PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type); if (filter->filter_type == HWRM_CFA_EM_FILTER) rc = bnxt_hwrm_clear_em_filter(bp, filter); else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) @@ -2321,13 +2425,12 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp) * Cleanup VNICs in reverse order, to make sure the L2 filter * from vnic0 is last to be cleaned up. */ - for (i = bp->nr_vnics - 1; i >= 0; i--) { + for (i = bp->max_vnics - 1; i >= 0; i--) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; - if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { - PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n"); - return; - } + // If the VNIC ID is invalid we are not currently using the VNIC + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + continue; bnxt_clear_hwrm_vnic_flows(bp, vnic); @@ -2614,11 +2717,27 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) goto port_phy_cfg; autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds); + if (BNXT_CHIP_THOR(bp) && + dev_conf->link_speeds == ETH_LINK_SPEED_40G) { + /* 40G is not supported as part of media auto detect. + * The speed should be forced and autoneg disabled + * to configure 40G speed. + */ + PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n"); + autoneg = 0; + } + speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds); link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY; - /* Autoneg can be done only when the FW allows */ - if (autoneg == 1 && !(bp->link_info.auto_link_speed || - bp->link_info.force_link_speed)) { + /* Autoneg can be done only when the FW allows. + * When user configures fixed speed of 40G and later changes to + * any other speed, auto_link_speed/force_link_speed is still set + * to 40G until link comes up at new speed. + */ + if (autoneg == 1 && + !(!BNXT_CHIP_THOR(bp) && + (bp->link_info.auto_link_speed || + bp->link_info.force_link_speed))) { link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG; link_req.auto_link_speed_mask = @@ -2683,6 +2802,10 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) { bp->flags |= BNXT_FLAG_TRUSTED_VF_EN; PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n"); + } else if (BNXT_VF(bp) && + !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) { + bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN; + PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n"); } if (mtu) @@ -3212,7 +3335,7 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp) req.enables = rte_cpu_to_le_32( HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); req.async_event_cr = rte_cpu_to_le_16( - bp->def_cp_ring->cp_ring_struct->fw_ring_id); + bp->async_cp_ring->cp_ring_struct->fw_ring_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3232,7 +3355,7 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp) req.enables = rte_cpu_to_le_32( HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); req.async_event_cr = rte_cpu_to_le_16( - bp->def_cp_ring->cp_ring_struct->fw_ring_id); + bp->async_cp_ring->cp_ring_struct->fw_ring_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -4184,23 +4307,31 @@ int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic) if (BNXT_CHIP_THOR(bp)) return bnxt_vnic_rss_configure_thor(bp, vnic); - /* - * Fill the RSS hash & redirection table with - * ring group ids for all VNICs - */ - for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; - rss_idx++, fw_idx++) { - for (i = 0; i < bp->rx_cp_nr_rings; i++) { - fw_idx %= bp->rx_cp_nr_rings; - if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID) - break; - fw_idx++; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return 0; + + if (vnic->rss_table && vnic->hash_type) { + /* + * Fill the RSS hash & redirection table with + * ring group ids for all VNICs + */ + for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; + rss_idx++, fw_idx++) { + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + fw_idx %= bp->rx_cp_nr_rings; + if (vnic->fw_grp_ids[fw_idx] != + INVALID_HW_RING_ID) + break; + fw_idx++; + } + if (i == bp->rx_cp_nr_rings) + return 0; + vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx]; } - if (i == bp->rx_cp_nr_rings) - return 0; - vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx]; + return bnxt_hwrm_vnic_rss_cfg(bp, vnic); } - return bnxt_hwrm_vnic_rss_cfg(bp, vnic); + + return 0; } static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, @@ -4293,7 +4424,10 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) struct hwrm_func_backing_store_qcaps_input req = {0}; struct hwrm_func_backing_store_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - int rc; + struct bnxt_ctx_pg_info *ctx_pg; + struct bnxt_ctx_mem_info *ctx; + int total_alloc_len; + int rc, i; if (!BNXT_CHIP_THOR(bp) || bp->hwrm_spec_code < HWRM_VERSION_1_9_2 || @@ -4305,70 +4439,60 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT_SILENT(); - if (!rc) { - struct bnxt_ctx_pg_info *ctx_pg; - struct bnxt_ctx_mem_info *ctx; - int total_alloc_len; - int i; - - total_alloc_len = sizeof(*ctx); - ctx = rte_malloc("bnxt_ctx_mem", total_alloc_len, - RTE_CACHE_LINE_SIZE); - if (!ctx) { - rc = -ENOMEM; - goto ctx_err; - } - memset(ctx, 0, total_alloc_len); + total_alloc_len = sizeof(*ctx); + ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len, + RTE_CACHE_LINE_SIZE); + if (!ctx) { + rc = -ENOMEM; + goto ctx_err; + } - ctx_pg = rte_malloc("bnxt_ctx_pg_mem", - sizeof(*ctx_pg) * BNXT_MAX_Q, - RTE_CACHE_LINE_SIZE); - if (!ctx_pg) { - rc = -ENOMEM; - goto ctx_err; - } - for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++) - ctx->tqm_mem[i] = ctx_pg; - - bp->ctx = ctx; - ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries); - ctx->qp_min_qp1_entries = - rte_le_to_cpu_16(resp->qp_min_qp1_entries); - ctx->qp_max_l2_entries = - rte_le_to_cpu_16(resp->qp_max_l2_entries); - ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size); - ctx->srq_max_l2_entries = - rte_le_to_cpu_16(resp->srq_max_l2_entries); - ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries); - ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size); - ctx->cq_max_l2_entries = - rte_le_to_cpu_16(resp->cq_max_l2_entries); - ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries); - ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size); - ctx->vnic_max_vnic_entries = - rte_le_to_cpu_16(resp->vnic_max_vnic_entries); - ctx->vnic_max_ring_table_entries = - rte_le_to_cpu_16(resp->vnic_max_ring_table_entries); - ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size); - ctx->stat_max_entries = - rte_le_to_cpu_32(resp->stat_max_entries); - ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size); - ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size); - ctx->tqm_min_entries_per_ring = - rte_le_to_cpu_32(resp->tqm_min_entries_per_ring); - ctx->tqm_max_entries_per_ring = - rte_le_to_cpu_32(resp->tqm_max_entries_per_ring); - ctx->tqm_entries_multiple = resp->tqm_entries_multiple; - if (!ctx->tqm_entries_multiple) - ctx->tqm_entries_multiple = 1; - ctx->mrav_max_entries = - rte_le_to_cpu_32(resp->mrav_max_entries); - ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size); - ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size); - ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries); - } else { - rc = 0; + ctx_pg = rte_malloc("bnxt_ctx_pg_mem", + sizeof(*ctx_pg) * BNXT_MAX_Q, + RTE_CACHE_LINE_SIZE); + if (!ctx_pg) { + rc = -ENOMEM; + goto ctx_err; } + for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++) + ctx->tqm_mem[i] = ctx_pg; + + bp->ctx = ctx; + ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries); + ctx->qp_min_qp1_entries = + rte_le_to_cpu_16(resp->qp_min_qp1_entries); + ctx->qp_max_l2_entries = + rte_le_to_cpu_16(resp->qp_max_l2_entries); + ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size); + ctx->srq_max_l2_entries = + rte_le_to_cpu_16(resp->srq_max_l2_entries); + ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries); + ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size); + ctx->cq_max_l2_entries = + rte_le_to_cpu_16(resp->cq_max_l2_entries); + ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries); + ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size); + ctx->vnic_max_vnic_entries = + rte_le_to_cpu_16(resp->vnic_max_vnic_entries); + ctx->vnic_max_ring_table_entries = + rte_le_to_cpu_16(resp->vnic_max_ring_table_entries); + ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size); + ctx->stat_max_entries = + rte_le_to_cpu_32(resp->stat_max_entries); + ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size); + ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size); + ctx->tqm_min_entries_per_ring = + rte_le_to_cpu_32(resp->tqm_min_entries_per_ring); + ctx->tqm_max_entries_per_ring = + rte_le_to_cpu_32(resp->tqm_max_entries_per_ring); + ctx->tqm_entries_multiple = resp->tqm_entries_multiple; + if (!ctx->tqm_entries_multiple) + ctx->tqm_entries_multiple = 1; + ctx->mrav_max_entries = + rte_le_to_cpu_32(resp->mrav_max_entries); + ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size); + ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size); + ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries); ctx_err: HWRM_UNLOCK(); return rc; @@ -4524,10 +4648,10 @@ bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type) bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_KONG(bp)); + HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB); req.tunnel_type = type; req.dest_fid = bp->fw_fid; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -4543,10 +4667,10 @@ bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type) bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_KONG(bp)); + HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB); req.tunnel_type = type; req.dest_fid = bp->fw_fid; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -4561,13 +4685,13 @@ int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type) bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_KONG(bp)); + HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB); req.src_fid = bp->fw_fid; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); if (type) - *type = resp->tunnel_mask; + *type = rte_le_to_cpu_32(resp->tunnel_mask); HWRM_UNLOCK(); @@ -4582,14 +4706,14 @@ int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type, bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_KONG(bp)); + HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB); req.src_fid = bp->fw_fid; req.tunnel_type = tun_type; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); if (dst_fid) - *dst_fid = resp->dest_fid; + *dst_fid = rte_le_to_cpu_16(resp->dest_fid); PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid); @@ -4622,3 +4746,230 @@ int bnxt_hwrm_set_mac(struct bnxt *bp) return rc; } + +int bnxt_hwrm_if_change(struct bnxt *bp, bool up) +{ + struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_drv_if_change_input req = {0}; + uint32_t flags; + int rc; + + if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE)) + return 0; + + /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery. + * If we issue FUNC_DRV_IF_CHANGE with flags down before + * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR + */ + if (!up && (bp->flags & BNXT_FLAG_FW_RESET)) + return 0; + + HWRM_PREP(req, FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB); + + if (up) + req.flags = + rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + flags = rte_le_to_cpu_32(resp->flags); + HWRM_UNLOCK(); + + if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) { + PMD_DRV_LOG(INFO, "FW reset happened while port was down\n"); + bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; + } + + return 0; +} + +int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) +{ + struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_error_recovery_info *info = bp->recovery_info; + struct hwrm_error_recovery_qcfg_input req = {0}; + uint32_t flags = 0; + unsigned int i; + int rc; + + /* Older FW does not have error recovery support */ + if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)) + return 0; + + if (!info) { + info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", + sizeof(*info), 0); + bp->recovery_info = info; + if (info == NULL) + return -ENOMEM; + } else { + memset(info, 0, sizeof(*info)); + } + + HWRM_PREP(req, ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + flags = rte_le_to_cpu_32(resp->flags); + if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST) + info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST; + else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) + info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU; + + if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) && + !(bp->flags & BNXT_FLAG_KONG_MB_EN)) { + rc = -EINVAL; + goto err; + } + + /* FW returned values are in units of 100msec */ + info->driver_polling_freq = + rte_le_to_cpu_32(resp->driver_polling_freq) * 100; + info->master_func_wait_period = + rte_le_to_cpu_32(resp->master_func_wait_period) * 100; + info->normal_func_wait_period = + rte_le_to_cpu_32(resp->normal_func_wait_period) * 100; + info->master_func_wait_period_after_reset = + rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100; + info->max_bailout_time_after_reset = + rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100; + info->status_regs[BNXT_FW_STATUS_REG] = + rte_le_to_cpu_32(resp->fw_health_status_reg); + info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] = + rte_le_to_cpu_32(resp->fw_heartbeat_reg); + info->status_regs[BNXT_FW_RECOVERY_CNT_REG] = + rte_le_to_cpu_32(resp->fw_reset_cnt_reg); + info->status_regs[BNXT_FW_RESET_INPROG_REG] = + rte_le_to_cpu_32(resp->reset_inprogress_reg); + info->reg_array_cnt = + rte_le_to_cpu_32(resp->reg_array_cnt); + + if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) { + rc = -EINVAL; + goto err; + } + + for (i = 0; i < info->reg_array_cnt; i++) { + info->reset_reg[i] = + rte_le_to_cpu_32(resp->reset_reg[i]); + info->reset_reg_val[i] = + rte_le_to_cpu_32(resp->reset_reg_val[i]); + info->delay_after_reset[i] = + resp->delay_after_reset[i]; + } +err: + HWRM_UNLOCK(); + + /* Map the FW status registers */ + if (!rc) + rc = bnxt_map_fw_health_status_regs(bp); + + if (rc) { + rte_free(bp->recovery_info); + bp->recovery_info = NULL; + } + return rc; +} + +int bnxt_hwrm_fw_reset(struct bnxt *bp) +{ + struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_fw_reset_input req = {0}; + int rc; + + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + HWRM_PREP(req, FW_RESET, BNXT_USE_KONG(bp)); + + req.embedded_proc_type = + HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP; + req.selfrst_status = + HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP; + req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL; + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), + BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp) +{ + struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_ts_query_input req = {0}; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t flags = 0; + int rc; + + if (!ptp) + return 0; + + HWRM_PREP(req, PORT_TS_QUERY, BNXT_USE_CHIMP_MB); + + switch (path) { + case BNXT_PTP_FLAGS_PATH_TX: + flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX; + break; + case BNXT_PTP_FLAGS_PATH_RX: + flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX; + break; + case BNXT_PTP_FLAGS_CURRENT_TIME: + flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME; + break; + } + + req.flags = rte_cpu_to_le_32(flags); + req.port_id = rte_cpu_to_le_16(bp->pf.port_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + if (timestamp) { + *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]); + *timestamp |= + (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32; + } + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp) +{ + struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp = + bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; + uint32_t flags = 0; + int rc = 0; + + if (!(bp->flags & BNXT_FLAG_ADV_FLOW_MGMT)) + return rc; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + HWRM_PREP(req, CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + flags = rte_le_to_cpu_32(resp->flags); + HWRM_UNLOCK(); + + if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN) { + bp->flow_flags |= BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN; + PMD_DRV_LOG(INFO, "Source L2 header filtering enabled\n"); + } + + return rc; +}