rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
return -ETIMEDOUT;
- PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
- req->req_type);
+ PMD_DRV_LOG(ERR,
+ "Error(timeout) sending msg 0x%04x, seq_id %d\n",
+ req->req_type, req->seq_id);
return -ETIMEDOUT;
}
return 0;
rc = -EINVAL; \
else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
rc = -ENOTSUP; \
+ else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
+ rc = -EAGAIN; \
else if (rc > 0) \
rc = -EIO; \
return rc; \
rc = -EINVAL; \
else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
rc = -ENOTSUP; \
+ else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
+ rc = -EAGAIN; \
else if (rc > 0) \
rc = -EIO; \
return rc; \
HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
- req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
+ req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
flags = rte_le_to_cpu_32(resp->flags);
if (BNXT_PF(bp)) {
- bp->pf.port_id = resp->port_id;
- bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
- bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
+ bp->pf->port_id = resp->port_id;
+ bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+ bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
new_max_vfs = bp->pdev->max_vfs;
- if (new_max_vfs != bp->pf.max_vfs) {
- if (bp->pf.vf_info)
- rte_free(bp->pf.vf_info);
- bp->pf.vf_info = rte_malloc("bnxt_vf_info",
- sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
- bp->pf.max_vfs = new_max_vfs;
+ if (new_max_vfs != bp->pf->max_vfs) {
+ if (bp->pf->vf_info)
+ rte_free(bp->pf->vf_info);
+ bp->pf->vf_info = rte_malloc("bnxt_vf_info",
+ sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
+ bp->pf->max_vfs = new_max_vfs;
for (i = 0; i < new_max_vfs; i++) {
- bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
- bp->pf.vf_info[i].vlan_table =
+ bp->pf->vf_info[i].fid =
+ bp->pf->first_vf_id + i;
+ bp->pf->vf_info[i].vlan_table =
rte_zmalloc("VF VLAN table",
getpagesize(),
getpagesize());
- if (bp->pf.vf_info[i].vlan_table == NULL)
+ if (bp->pf->vf_info[i].vlan_table == NULL)
PMD_DRV_LOG(ERR,
"Fail to alloc VLAN table for VF %d\n",
i);
else
rte_mem_lock_page(
- bp->pf.vf_info[i].vlan_table);
- bp->pf.vf_info[i].vlan_as_table =
+ bp->pf->vf_info[i].vlan_table);
+ bp->pf->vf_info[i].vlan_as_table =
rte_zmalloc("VF VLAN AS table",
getpagesize(),
getpagesize());
- if (bp->pf.vf_info[i].vlan_as_table == NULL)
+ if (bp->pf->vf_info[i].vlan_as_table == NULL)
PMD_DRV_LOG(ERR,
"Alloc VLAN AS table for VF %d fail\n",
i);
else
rte_mem_lock_page(
- bp->pf.vf_info[i].vlan_as_table);
- STAILQ_INIT(&bp->pf.vf_info[i].filter);
+ bp->pf->vf_info[i].vlan_as_table);
+ STAILQ_INIT(&bp->pf->vf_info[i].filter);
}
}
}
bp->fw_fid = rte_le_to_cpu_32(resp->fid);
- memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
+ if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
+ bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
+ memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
+ } else {
+ bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
+ }
bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
bp->max_l2_ctx += bp->max_rx_em_flows;
/* TODO: For now, do not support VMDq/RFS on VFs. */
if (BNXT_PF(bp)) {
- if (bp->pf.max_vfs)
+ if (bp->pf->max_vfs)
bp->max_vnics = 1;
else
bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
} else {
bp->max_vnics = 1;
}
+ PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
+ bp->max_l2_ctx, bp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
if (BNXT_PF(bp)) {
- bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
if (BNXT_PF(bp)) {
req.enables |= rte_cpu_to_le_32(
HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
- memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
+ memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
RTE_MIN(sizeof(req.vf_req_fwd),
- sizeof(bp->pf.vf_req_fwd)));
+ sizeof(bp->pf->vf_req_fwd)));
/*
* PF can sniff HWRM API issued by VF. This can be set up by
dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
if (bp->max_resp_len != max_resp_len) {
- sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
+ sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function);
(dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
- sprintf(type, "bnxt_hwrm_short_%04x:%02x:%02x:%02x",
+ sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function);
PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
if (dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
- bp->flags |= BNXT_FLAG_ADV_FLOW_MGMT;
+ bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
}
+ if (dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
+ PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
+ bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
+ }
+
+
error:
HWRM_UNLOCK();
return rc;
if (conf->link_up) {
/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
- if (bp->link_info.auto_mode && conf->link_speed) {
+ if (bp->link_info->auto_mode && conf->link_speed) {
req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
}
struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc;
- req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
req.enables = rte_cpu_to_le_32(
HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- bp->pf.vf_info[vf].random_mac = false;
+ bp->pf->vf_info[vf].random_mac = false;
return rc;
}
}
int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
- struct rte_eth_stats *stats)
+ struct rte_eth_stats *stats,
+ struct hwrm_func_qstats_output *func_qstats)
{
int rc = 0;
struct hwrm_func_qstats_input req = {.req_type = 0};
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
+ if (func_qstats)
+ memcpy(func_qstats, resp,
+ sizeof(struct hwrm_func_qstats_output));
+
+ if (!stats)
+ goto exit;
stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
+exit:
HWRM_UNLOCK();
return rc;
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[queue_index].rx_fw_ring_id =
INVALID_HW_RING_ID;
- memset(rxr->rx_desc_ring, 0,
- rxr->rx_ring_struct->ring_size *
- sizeof(*rxr->rx_desc_ring));
- memset(rxr->rx_buf_ring, 0,
- rxr->rx_ring_struct->ring_size *
- sizeof(*rxr->rx_buf_ring));
- rxr->rx_prod = 0;
}
ring = rxr->ag_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
BNXT_CHIP_THOR(bp) ?
HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
HWRM_RING_FREE_INPUT_RING_TYPE_RX);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- memset(rxr->ag_buf_ring, 0,
- rxr->ag_ring_struct->ring_size *
- sizeof(*rxr->ag_buf_ring));
- rxr->ag_prod = 0;
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[queue_index].ag_fw_ring_id =
INVALID_HW_RING_ID;
struct rte_pci_device *pdev = bp->pdev;
char type[RTE_MEMZONE_NAMESIZE];
- sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
+ sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
bp->max_resp_len = HWRM_MAX_RESP_LEN;
bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
break;
+ case ETH_LINK_SPEED_200G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_200GB;
+ break;
default:
PMD_DRV_LOG(ERR,
"Unsupported link speed %d; default to AUTO\n",
#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
- ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
+ ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
+ ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
-static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
+static int bnxt_validate_link_speed(struct bnxt *bp)
{
+ uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
+ uint16_t port_id = bp->eth_dev->data->port_id;
+ uint32_t link_speed_capa;
uint32_t one_speed;
if (link_speed == ETH_LINK_SPEED_AUTONEG)
return 0;
+ link_speed_capa = bnxt_get_speed_capabilities(bp);
+
if (link_speed & ETH_LINK_SPEED_FIXED) {
one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
link_speed, port_id);
return -EINVAL;
}
- if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
+ if ((one_speed & link_speed_capa) != one_speed) {
PMD_DRV_LOG(ERR,
"Unsupported advertised speed (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
} else {
- if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
+ if (!(link_speed & link_speed_capa)) {
PMD_DRV_LOG(ERR,
"Unsupported advertised speeds (%u) for port %u\n",
link_speed, port_id);
uint16_t ret = 0;
if (link_speed == ETH_LINK_SPEED_AUTONEG) {
- if (bp->link_info.support_speeds)
- return bp->link_info.support_speeds;
+ if (bp->link_info->support_speeds)
+ return bp->link_info->support_speeds;
link_speed = BNXT_SUPPORTED_SPEEDS;
}
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
if (link_speed & ETH_LINK_SPEED_100G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
+ if (link_speed & ETH_LINK_SPEED_200G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_200GB;
return ret;
}
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
eth_link_speed = ETH_SPEED_NUM_100G;
break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
+ eth_link_speed = ETH_SPEED_NUM_200G;
+ break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
{
int rc = 0;
- struct bnxt_link_info *link_info = &bp->link_info;
+ struct bnxt_link_info *link_info = bp->link_info;
rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
if (rc) {
if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
return 0;
- rc = bnxt_valid_link_speed(dev_conf->link_speeds,
- bp->eth_dev->data->port_id);
+ rc = bnxt_validate_link_speed(bp);
if (rc)
goto error;
*/
if (autoneg == 1 &&
!(!BNXT_CHIP_THOR(bp) &&
- (bp->link_info.auto_link_speed ||
- bp->link_info.force_link_speed))) {
+ (bp->link_info->auto_link_speed ||
+ bp->link_info->force_link_speed))) {
link_req.phy_flags |=
HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
link_req.auto_link_speed_mask =
bnxt_parse_eth_link_speed_mask(bp,
dev_conf->link_speeds);
} else {
- if (bp->link_info.phy_type ==
+ if (bp->link_info->phy_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
- bp->link_info.phy_type ==
+ bp->link_info->phy_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
- bp->link_info.media_type ==
+ bp->link_info->media_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
return -EINVAL;
/* If user wants a particular speed try that first. */
if (speed)
link_req.link_speed = speed;
- else if (bp->link_info.force_link_speed)
- link_req.link_speed = bp->link_info.force_link_speed;
+ else if (bp->link_info->force_link_speed)
+ link_req.link_speed = bp->link_info->force_link_speed;
else
- link_req.link_speed = bp->link_info.auto_link_speed;
+ link_req.link_speed = bp->link_info->auto_link_speed;
}
link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
- link_req.auto_pause = bp->link_info.auto_pause;
- link_req.force_pause = bp->link_info.force_pause;
+ link_req.auto_pause = bp->link_info->auto_pause;
+ link_req.force_pause = bp->link_info->force_pause;
port_phy_cfg:
rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
uint16_t flags;
int rc = 0;
+ bp->func_svif = BNXT_SVIF_INVALID;
+ uint16_t svif_info;
HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
/* Hard Coded.. 0xfff VLAN ID mask */
bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
+
+ svif_info = rte_le_to_cpu_16(resp->svif_info);
+ if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
+ bp->func_svif = svif_info &
+ HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
+
flags = rte_le_to_cpu_16(resp->flags);
if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
return rc;
}
+int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
+{
+ struct hwrm_port_mac_qcfg_input req = {0};
+ struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t port_svif_info;
+ int rc;
+
+ bp->port_svif = BNXT_SVIF_INVALID;
+
+ if (!BNXT_PF(bp))
+ return 0;
+
+ HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+
+ port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
+ if (port_svif_info &
+ HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
+ bp->port_svif = port_svif_info &
+ HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
+
+ HWRM_UNLOCK();
+
+ return 0;
+}
+
static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
struct hwrm_func_qcaps_output *qcaps)
{
req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
}
- req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
+ req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
cfg_req->enables |=
rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
rte_eth_random_addr(cfg_req->dflt_mac_addr);
- bp->pf.vf_info[vf].random_mac = true;
+ bp->pf->vf_info[vf].random_mac = true;
} else {
memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
RTE_ETHER_ADDR_LEN);
/* Get the actual allocated values now */
HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc) {
/* Check for zero MAC address */
HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
rc = rte_le_to_cpu_16(resp->vlan);
/* Only TX ring value reflects actual allocation? TODO */
bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
- bp->pf.evb_mode = resp->evb_mode;
+ bp->pf->evb_mode = resp->evb_mode;
HWRM_UNLOCK();
if (rc)
return rc;
- bp->pf.func_cfg_flags &=
+ bp->pf->func_cfg_flags &=
~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
- bp->pf.func_cfg_flags |=
+ bp->pf->func_cfg_flags |=
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
rc = __bnxt_hwrm_func_qcaps(bp);
if (rc)
return rc;
- bp->pf.active_vfs = num_vfs;
+ bp->pf->active_vfs = num_vfs;
/*
* First, configure the PF to only use one TX ring. This ensures that
*
* This has been fixed with firmware versions above 20.6.54
*/
- bp->pf.func_cfg_flags &=
+ bp->pf->func_cfg_flags &=
~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
- bp->pf.func_cfg_flags |=
+ bp->pf->func_cfg_flags |=
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
rc = bnxt_hwrm_pf_func_cfg(bp, 1);
if (rc)
* Now, create and register a buffer to hold forwarded VF requests
*/
req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
- bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
+ bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
- if (bp->pf.vf_req_buf == NULL) {
+ if (bp->pf->vf_req_buf == NULL) {
rc = -ENOMEM;
goto error_free;
}
for (sz = 0; sz < req_buf_sz; sz += getpagesize())
- rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
+ rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
for (i = 0; i < num_vfs; i++)
- bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
+ bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
(i * HWRM_MAX_REQ_LEN);
rc = bnxt_hwrm_func_buf_rgtr(bp);
populate_vf_func_cfg_req(bp, &req, num_vfs);
- bp->pf.active_vfs = 0;
+ bp->pf->active_vfs = 0;
for (i = 0; i < num_vfs; i++) {
add_random_mac_if_needed(bp, &req, i);
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
- req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
+ req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
rc = bnxt_hwrm_send_message(bp,
&req,
sizeof(req),
HWRM_UNLOCK();
reserve_resources_from_vf(bp, &req, i);
- bp->pf.active_vfs++;
- bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
+ bp->pf->active_vfs++;
+ bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
}
/*
req.fid = rte_cpu_to_le_16(0xffff);
req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
- req.evb_mode = bp->pf.evb_mode;
+ req.evb_mode = bp->pf->evb_mode;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
req.flags = rte_cpu_to_le_32(flags);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
req.req_buf_num_pages = rte_cpu_to_le_16(1);
req.req_buf_page_size = rte_cpu_to_le_16(
- page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
+ page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN));
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
req.req_buf_page_addr0 =
- rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf));
+ rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
- req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
+ req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
req.enables = rte_cpu_to_le_32(
HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = rte_cpu_to_le_16(
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
if (is_vf) {
- dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
- fid = bp->pf.vf_info[vf].fid;
- func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
+ dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
+ fid = bp->pf->vf_info[vf].fid;
+ func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
} else {
fid = rte_cpu_to_le_16(0xffff);
- func_cfg_flags = bp->pf.func_cfg_flags;
+ func_cfg_flags = bp->pf->func_cfg_flags;
dflt_vlan = bp->vlan;
}
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(enables);
- req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
req.max_bw = rte_cpu_to_le_32(max_bw);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
- req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
- req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
+ req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
{
struct hwrm_port_qstats_input req = {0};
struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
- struct bnxt_pf_info *pf = &bp->pf;
+ struct bnxt_pf_info *pf = bp->pf;
int rc;
HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
{
struct hwrm_port_clr_stats_input req = {0};
struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
- struct bnxt_pf_info *pf = &bp->pf;
+ struct bnxt_pf_info *pf = bp->pf;
int rc;
/* Not allowed on NS2 device, NPAR, MultiHost, VF */
return 0;
HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
- req.port_id = bp->pf.port_id;
+ req.port_id = bp->pf->port_id;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
unsigned int i;
- bp->num_leds = resp->num_leds;
+ bp->leds->num_leds = resp->num_leds;
memcpy(bp->leds, &resp->led0_id,
- sizeof(bp->leds[0]) * bp->num_leds);
- for (i = 0; i < bp->num_leds; i++) {
+ sizeof(bp->leds[0]) * bp->leds->num_leds);
+ for (i = 0; i < bp->leds->num_leds; i++) {
struct bnxt_led_info *led = &bp->leds[i];
uint16_t caps = led->led_state_caps;
if (!led->led_group_id ||
!BNXT_LED_ALT_BLINK_CAP(caps)) {
- bp->num_leds = 0;
+ bp->leds->num_leds = 0;
break;
}
}
uint16_t duration = 0;
int rc, i;
- if (!bp->num_leds || BNXT_VF(bp))
+ if (!bp->leds->num_leds || BNXT_VF(bp))
return -EOPNOTSUPP;
HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
duration = rte_cpu_to_le_16(500);
}
- req.port_id = bp->pf.port_id;
- req.num_leds = bp->num_leds;
+ req.port_id = bp->pf->port_id;
+ req.num_leds = bp->leds->num_leds;
led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
- for (i = 0; i < bp->num_leds; i++, led_cfg++) {
+ for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
req.enables |= BNXT_LED_DFLT_ENABLES(i);
led_cfg->led_id = bp->leds[i].led_id;
led_cfg->led_state = led_state;
/* First query all VNIC ids */
HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
- req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
- req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
+ req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
+ req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
size_t sz;
/* First query all VNIC ids */
- vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+ vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
RTE_CACHE_LINE_SIZE);
if (vnic_ids == NULL)
for (i = 0; i < num_vnic_ids; i++) {
memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
- rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
+ rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
if (rc)
break;
if (vnic.mru <= 4) /* Indicates unallocated */
HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
- req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(
HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
req.vlan_antispoof_mode = on ?
size_t sz;
int rc;
- vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+ vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
RTE_CACHE_LINE_SIZE);
if (vnic_ids == NULL)
memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
- bp->pf.first_vf_id + vf);
+ bp->pf->first_vf_id + vf);
if (rc)
goto exit;
if (vnic.func_default) {
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
int total_alloc_len;
- int rc, i;
+ int rc, i, tqm_rings;
if (!BNXT_CHIP_THOR(bp) ||
bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
goto ctx_err;
}
- ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
- sizeof(*ctx_pg) * BNXT_MAX_Q,
- RTE_CACHE_LINE_SIZE);
- if (!ctx_pg) {
- rc = -ENOMEM;
- goto ctx_err;
- }
- for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
- ctx->tqm_mem[i] = ctx_pg;
-
- bp->ctx = ctx;
ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
ctx->qp_min_qp1_entries =
rte_le_to_cpu_16(resp->qp_min_qp1_entries);
ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
+ ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
+
+ if (!ctx->tqm_fp_rings_count)
+ ctx->tqm_fp_rings_count = bp->max_q;
+
+ tqm_rings = ctx->tqm_fp_rings_count + 1;
+
+ ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
+ sizeof(*ctx_pg) * tqm_rings,
+ RTE_CACHE_LINE_SIZE);
+ if (!ctx_pg) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ for (i = 0; i < tqm_rings; i++, ctx_pg++)
+ ctx->tqm_mem[i] = ctx_pg;
+
+ bp->ctx = ctx;
ctx_err:
HWRM_UNLOCK();
return rc;
{
struct hwrm_port_qstats_ext_input req = {0};
struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
- struct bnxt_pf_info *pf = &bp->pf;
+ struct bnxt_pf_info *pf = bp->pf;
int rc;
if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
HWRM_CHECK_RESULT();
- memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
HWRM_UNLOCK();
return rc;
if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
return 0;
- if (!info) {
- info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
- sizeof(*info), 0);
- bp->recovery_info = info;
- if (info == NULL)
- return -ENOMEM;
- } else {
- memset(info, 0, sizeof(*info));
- }
-
HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
}
req.flags = rte_cpu_to_le_32(flags);
- req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
+ req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
uint32_t flags = 0;
int rc = 0;
- if (!(bp->flags & BNXT_FLAG_ADV_FLOW_MGMT))
+ if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT))
return rc;
if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
return rc;
}
+
+int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
+{
+ int rc = 0;
+
+ struct hwrm_cfa_counter_qcaps_input req = {0};
+ struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
+ req.target_id = rte_cpu_to_le_16(bp->fw_fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+
+ HWRM_CHECK_RESULT();
+ if (max_fc)
+ *max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
+ HWRM_UNLOCK();
+
+ return 0;
+}
+
+int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
+{
+ int rc = 0;
+ struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
+ struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
+
+ req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
+ req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
+ req.page_dir = rte_cpu_to_le_64(dma_addr);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+
+ HWRM_CHECK_RESULT();
+ if (ctx_id) {
+ *ctx_id = rte_le_to_cpu_16(resp->ctx_id);
+ PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
+ }
+ HWRM_UNLOCK();
+
+ return 0;
+}
+
+int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
+{
+ int rc = 0;
+ struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
+ struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
+
+ req.ctx_id = rte_cpu_to_le_16(ctx_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
+ uint16_t cntr, uint16_t ctx_id,
+ uint32_t num_entries, bool enable)
+{
+ struct hwrm_cfa_counter_cfg_input req = {0};
+ struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t flags = 0;
+ int rc;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
+
+ req.target_id = rte_cpu_to_le_16(bp->fw_fid);
+ req.counter_type = rte_cpu_to_le_16(cntr);
+ flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
+ HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
+ flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
+ if (dir == BNXT_DIR_RX)
+ flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
+ else if (dir == BNXT_DIR_TX)
+ flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
+ req.flags = rte_cpu_to_le_16(flags);
+ req.ctx_id = rte_cpu_to_le_16(ctx_id);
+ req.num_entries = rte_cpu_to_le_32(num_entries);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return 0;
+}
+
+int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
+ enum bnxt_flow_dir dir,
+ uint16_t cntr,
+ uint16_t num_entries)
+{
+ struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_counter_qstats_input req = {0};
+ uint16_t flow_ctx_id = 0;
+ uint16_t flags = 0;
+ int rc = 0;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ if (dir == BNXT_DIR_RX) {
+ flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
+ flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
+ } else if (dir == BNXT_DIR_TX) {
+ flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
+ flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
+ }
+
+ HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
+ req.target_id = rte_cpu_to_le_16(bp->fw_fid);
+ req.counter_type = rte_cpu_to_le_16(cntr);
+ req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
+ req.num_entries = rte_cpu_to_le_16(num_entries);
+ req.flags = rte_cpu_to_le_16(flags);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return 0;
+}