if (bp->flags & BNXT_FLAG_FATAL_ERROR)
return 0;
- /* For VER_GET command, set timeout as 50ms */
- if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
- timeout = HWRM_CMD_TIMEOUT;
- else
- timeout = bp->hwrm_cmd_timeout;
+ timeout = bp->hwrm_cmd_timeout;
if (bp->flags & BNXT_FLAG_SHORT_CMD ||
msg_len > bp->max_req_len) {
*/
#define HWRM_PREP(req, type, kong) do { \
rte_spinlock_lock(&bp->hwrm_lock); \
+ if (bp->hwrm_cmd_resp_addr == NULL) { \
+ rte_spinlock_unlock(&bp->hwrm_lock); \
+ return -EACCES; \
+ } \
memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
req.req_type = rte_cpu_to_le_16(HWRM_##type); \
req.cmpl_ring = rte_cpu_to_le_16(-1); \
if (vlan_table) {
if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
- req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
- rte_mem_virt2iova(vlan_table));
+ req.vlan_tag_tbl_addr =
+ rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
}
req.mask = rte_cpu_to_le_32(mask);
req.fid = rte_cpu_to_le_16(fid);
req.vlan_tag_mask_tbl_addr =
- rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
+ rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
}
int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
- struct bnxt_filter_info *filter)
+ struct bnxt_filter_info *filter)
{
int rc = 0;
struct bnxt_filter_info *l2_filter = filter;
+ struct bnxt_vnic_info *vnic = NULL;
struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
filter, l2_filter, l2_filter->l2_ref_cnt);
+ if (l2_filter->l2_ref_cnt == 0)
+ return 0;
+
if (l2_filter->l2_ref_cnt > 0)
l2_filter->l2_ref_cnt--;
HWRM_UNLOCK();
filter->fw_l2_filter_id = UINT64_MAX;
+ if (l2_filter->l2_ref_cnt == 0) {
+ vnic = l2_filter->vnic;
+ if (vnic) {
+ STAILQ_REMOVE(&vnic->filter, l2_filter,
+ bnxt_filter_info, next);
+ bnxt_free_filter(bp, l2_filter);
+ }
+ }
return 0;
}
HWRM_CHECK_RESULT();
filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
+ filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
HWRM_UNLOCK();
+ filter->l2_ref_cnt++;
+
return rc;
}
bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
- bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
+ bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
- } else {
- bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
}
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
- bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
- else
- bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
+ bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
+
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
+ bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
HWRM_UNLOCK();
if (bp->flags & BNXT_FLAG_REGISTERED)
return 0;
- flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
- if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
+ if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+ flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
/* PFs and trusted VFs should indicate the support of the
ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
- if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
req.async_event_fwd[0] |=
rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
req.async_event_fwd[1] |=
rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
+ if (BNXT_PF(bp))
+ req.async_event_fwd[1] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
flags = rte_le_to_cpu_32(resp->flags);
if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
- bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE;
+ bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
HWRM_UNLOCK();
return rc;
}
-int bnxt_hwrm_ver_get(struct bnxt *bp)
+int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
{
int rc = 0;
struct hwrm_ver_get_input req = {.req_type = 0 };
uint32_t dev_caps_cfg;
bp->max_req_len = HWRM_MAX_REQ_LEN;
+ bp->hwrm_cmd_timeout = timeout;
HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
/* convert timeout to usec */
bp->hwrm_cmd_timeout *= 1000;
if (!bp->hwrm_cmd_timeout)
- bp->hwrm_cmd_timeout = HWRM_CMD_TIMEOUT;
+ bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
rc = -ENOMEM;
goto error;
}
- rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
+ rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"Unable to map response buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
}
- rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
bp->hwrm_short_cmd_req_dma_addr =
- rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
+ rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
rte_free(bp->hwrm_short_cmd_req_addr);
PMD_DRV_LOG(ERR,
return rc;
}
+static bool bnxt_find_lossy_profile(struct bnxt *bp)
+{
+ int i = 0;
+
+ for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
+ if (bp->tx_cos_queue[i].profile ==
+ HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
+ bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
+ return true;
+ }
+ }
+ return false;
+}
+
+static void bnxt_find_first_valid_profile(struct bnxt *bp)
+{
+ int i = 0;
+
+ for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
+ if (bp->tx_cos_queue[i].profile !=
+ HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
+ bp->tx_cos_queue[i].id !=
+ HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
+ bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
+ break;
+ }
+ }
+}
+
int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
{
int rc = 0;
bp->tx_cos_queue[i].id;
}
} else {
- for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
- if (bp->tx_cos_queue[i].profile ==
- HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
- bp->tx_cosq_id[0] =
- bp->tx_cos_queue[i].id;
- break;
- }
- }
+ /* When CoS classification is disabled, for normal NIC
+ * operations, ideally we should look to use LOSSY.
+ * If not found, fallback to the first valid profile
+ */
+ if (!bnxt_find_lossy_profile(bp))
+ bnxt_find_first_valid_profile(bp);
+
}
}
return 0;
}
-int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
+static int
+bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
{
int rc;
unsigned int i;
return rc;
}
-int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
+static int
+bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
{
uint16_t idx;
uint32_t rc = 0;
bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
}
-int bnxt_free_all_hwrm_rings(struct bnxt *bp)
+static int
+bnxt_free_all_hwrm_rings(struct bnxt *bp)
{
unsigned int i;
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
bp->max_resp_len = HWRM_MAX_RESP_LEN;
bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
- rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_addr == NULL)
return -ENOMEM;
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
+ rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return 0;
}
-int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+static int
+bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct bnxt_filter_info *filter;
int rc = 0;
rc = bnxt_hwrm_clear_em_filter(bp, filter);
else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
- else
- rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+ rc = bnxt_hwrm_clear_l2_filter(bp, filter);
STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
bnxt_free_filter(bp, filter);
}
rc = bnxt_hwrm_clear_em_filter(bp, filter);
else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
- else
- rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+ rc = bnxt_hwrm_clear_l2_filter(bp, filter);
STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
rte_free(flow);
return rc;
}
-void bnxt_free_tunnel_ports(struct bnxt *bp)
+static void
+bnxt_free_tunnel_ports(struct bnxt *bp)
{
if (bp->vxlan_port_cnt)
bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
/* FALLTHROUGH */
- bp->port_partition_type = resp->port_partition_type;
+ bp->flags |= BNXT_FLAG_NPAR_PF;
break;
default:
- bp->port_partition_type = 0;
+ bp->flags &= ~BNXT_FLAG_NPAR_PF;
break;
}
}
req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
- req.mtu = rte_cpu_to_le_16(RTE_MIN(bp->eth_dev->data->mtu,
- BNXT_MAX_MTU)); //FW adds hdr sizes
+ req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
}
}
-static void reserve_resources_from_vf(struct bnxt *bp,
- struct hwrm_func_cfg_input *cfg_req,
- int vf)
+static int reserve_resources_from_vf(struct bnxt *bp,
+ struct hwrm_func_cfg_input *cfg_req,
+ int vf)
{
struct hwrm_func_qcaps_input req = {0};
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
HWRM_UNLOCK();
+
+ return 0;
}
int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
req.req_buf_page_addr0 =
- rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
+ rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
buflen = dir_entries * entry_length;
buf = rte_malloc("nvm_dir", buflen, 0);
- rte_mem_lock_page(buf);
if (buf == NULL)
return -ENOMEM;
- dma_handle = rte_mem_virt2iova(buf);
+ dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
buf = rte_malloc("nvm_item", length, 0);
- rte_mem_lock_page(buf);
if (!buf)
return -ENOMEM;
- dma_handle = rte_mem_virt2iova(buf);
+ dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
uint8_t *buf;
buf = rte_malloc("nvm_write", data_len, 0);
- rte_mem_lock_page(buf);
if (!buf)
return -ENOMEM;
- dma_handle = rte_mem_virt2iova(buf);
+ dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
- req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
+ req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
HWRM_UNLOCK();
if (filter->fw_em_filter_id == UINT64_MAX)
return 0;
- PMD_DRV_LOG(ERR, "Clear EM filter\n");
HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
HWRM_CHECK_RESULT();
filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
+ filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
HWRM_UNLOCK();
return rc;
uint32_t flags;
int rc;
- if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE))
+ if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
return 0;
/* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
int rc;
/* Older FW does not have error recovery support */
- if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY))
+ if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
return 0;
if (!info) {