{ .vendor_id = 0, /* sentinel */ },
};
-#define BNXT_DEVARG_ACCUM_STATS "accum-stats"
#define BNXT_DEVARG_FLOW_XSTAT "flow-xstat"
#define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows"
#define BNXT_DEVARG_REPRESENTOR "representor"
static const char *const bnxt_dev_args[] = {
BNXT_DEVARG_REPRESENTOR,
- BNXT_DEVARG_ACCUM_STATS,
BNXT_DEVARG_FLOW_XSTAT,
BNXT_DEVARG_MAX_NUM_KFLOWS,
BNXT_DEVARG_REP_BASED_PF,
NULL
};
-/*
- * accum-stats == false to disable flow counter accumulation
- * accum-stats == true to enable flow counter accumulation
- */
-#define BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats) ((accum_stats) > 1)
-
/*
* app-id = an non-negative 8-bit number
*/
if (rc)
goto alloc_mem_err;
- rc = bnxt_alloc_vnic_attributes(bp);
+ rc = bnxt_alloc_vnic_attributes(bp, reconfig);
if (rc)
goto alloc_mem_err;
if (BNXT_CHIP_P5(bp))
bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5;
- rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
- if (rc) {
- PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
- goto err_out;
- }
-
rc = bnxt_alloc_hwrm_rings(bp);
if (rc) {
PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
}
}
- /* default vnic 0 */
- rc = bnxt_setup_one_vnic(bp, 0);
- if (rc)
- goto err_out;
/* VNIC configuration */
- if (BNXT_RFS_NEEDS_VNIC(bp)) {
- for (i = 1; i < bp->nr_vnics; i++) {
- rc = bnxt_setup_one_vnic(bp, i);
- if (rc)
- goto err_out;
- }
+ for (i = 0; i < bp->nr_vnics; i++) {
+ rc = bnxt_setup_one_vnic(bp, i);
+ if (rc)
+ goto err_out;
}
for (j = 0; j < bp->tx_nr_rings; j++) {
return rc;
/* MAC Specifics */
- dev_info->max_mac_addrs = bp->max_l2_ctx;
+ dev_info->max_mac_addrs = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR);
dev_info->max_hash_mac_addrs = 0;
/* PF/VF specifics */
dev_info->min_rx_bufsize = 1;
dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
- dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
- if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
- dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
- if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
- dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa = bnxt_get_rx_port_offloads(bp);
dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
- dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
+ dev_info->tx_offload_capa = bnxt_get_tx_port_offloads(bp) |
dev_info->tx_queue_offload_capa;
- if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
- dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+ dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
{
struct bnxt *bp = eth_dev->data->dev_private;
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
+ struct rte_eth_rss_conf *rss_conf = ð_dev->data->dev_conf.rx_adv_conf.rss_conf;
int rc;
bp->rx_queues = (void *)eth_dev->data->rx_queues;
rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
+ /* application provides the hash key to program */
+ if (rss_conf->rss_key != NULL) {
+ if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE)
+ PMD_DRV_LOG(WARNING, "port %u RSS key len must be %d bytes long",
+ eth_dev->data->port_id, HW_HASH_KEY_SIZE);
+ else
+ memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE);
+ }
+ bp->rss_conf.rss_key_len = HW_HASH_KEY_SIZE;
+ bp->rss_conf.rss_hf = rss_conf->rss_hf;
+
bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
return 0;
struct rte_eth_link *link = ð_dev->data->dev_link;
if (link->link_status)
- PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
+ PMD_DRV_LOG(DEBUG, "Port %d Link Up - speed %u Mbps - %s\n",
eth_dev->data->port_id,
(uint32_t)link->link_speed,
(link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
eth_dev->data->dev_started = 0;
/* Prevent crashes when queues are still in use */
- eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
- eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
+ bnxt_stop_rxtx(eth_dev);
bnxt_disable_int(bp);
}
/* Unload the driver, release resources */
-static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
+int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
return bnxt_dev_stop(eth_dev);
}
-static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
+int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
bnxt_cancel_fc_thread(bp);
+ rte_eal_alarm_cancel(bnxt_handle_vf_cfg_change, (void *)bp);
if (eth_dev->data->dev_started)
ret = bnxt_dev_stop(eth_dev);
return -EINVAL;
}
- bp->flags |= BNXT_FLAG_UPDATE_HASH;
- memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf,
- rss_conf,
- sizeof(*rss_conf));
-
/* Update the default RSS VNIC(s) */
vnic = BNXT_GET_DEFAULT_VNIC(bp);
vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
RTE_ETH_RSS_LEVEL(rss_conf->rss_hf));
+ /* Cache the hash function */
+ bp->rss_conf.rss_hf = rss_conf->rss_hf;
+
/*
* If hashkey is not specified, use the previously configured
* hashkey
}
memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
+ /* Cache the hash key */
+ memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE);
+
rss_config:
rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
return rc;
uint32_t nb_mc_addr)
{
struct bnxt *bp = eth_dev->data->dev_private;
- char *mc_addr_list = (char *)mc_addr_set;
struct bnxt_vnic_info *vnic;
- uint32_t off = 0, i = 0;
+ uint32_t i = 0;
int rc;
rc = is_bnxt_in_error(bp);
vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ bp->nb_mc_addr = nb_mc_addr;
+
if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
goto allmulti;
/* TODO Check for Duplicate mcast addresses */
vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
- for (i = 0; i < nb_mc_addr; i++) {
- memcpy(vnic->mc_list + off, &mc_addr_list[i],
- RTE_ETHER_ADDR_LEN);
- off += RTE_ETHER_ADDR_LEN;
- }
+ for (i = 0; i < nb_mc_addr; i++)
+ rte_ether_addr_copy(&mc_addr_set[i], &bp->mcast_addr_list[i]);
- vnic->mc_addr_cnt = i;
- if (vnic->mc_addr_cnt)
+ if (bp->nb_mc_addr)
vnic->flags |= BNXT_VNIC_INFO_MCAST;
else
vnic->flags &= ~BNXT_VNIC_INFO_MCAST;
return 0;
}
+static int bnxt_restore_mcast_mac_filters(struct bnxt *bp)
+{
+ int ret = 0;
+
+ ret = bnxt_dev_set_mc_addr_list_op(bp->eth_dev, bp->mcast_addr_list,
+ bp->nb_mc_addr);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to restore multicast MAC addreeses\n");
+
+ return ret;
+}
+
static int bnxt_restore_filters(struct bnxt *bp)
{
struct rte_eth_dev *dev = bp->eth_dev;
if (ret)
return ret;
+ /* if vlans are already programmed, this can fail with -EEXIST */
ret = bnxt_restore_vlan_filters(bp);
- /* TODO restore other filters as well */
+ if (ret && ret != -EEXIST)
+ return ret;
+
+ ret = bnxt_restore_mcast_mac_filters(bp);
+ if (ret)
+ return ret;
+
return ret;
}
/* Clear Error flag so that device re-init should happen */
bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
+ PMD_DRV_LOG(INFO, "Port: %u Starting recovery...\n",
+ bp->eth_dev->data->port_id);
rc = bnxt_check_fw_ready(bp);
if (rc)
if (rc)
goto err_start;
- PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
+ rte_eth_fp_ops[bp->eth_dev->data->port_id].rx_pkt_burst =
+ bp->eth_dev->rx_pkt_burst;
+ rte_eth_fp_ops[bp->eth_dev->data->port_id].tx_pkt_burst =
+ bp->eth_dev->tx_pkt_burst;
+ rte_mb();
+
+ PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n",
+ bp->eth_dev->data->port_id);
pthread_mutex_unlock(&bp->err_recovery_lock);
return;
RTE_ETH_EVENT_INTR_RMV,
NULL);
pthread_mutex_unlock(&bp->err_recovery_lock);
- PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
+ PMD_DRV_LOG(ERR, "Port %u: Failed to recover from FW reset\n",
+ bp->eth_dev->data->port_id);
}
void bnxt_dev_reset_and_resume(void *arg)
int rc;
bnxt_dev_cleanup(bp);
+ PMD_DRV_LOG(INFO, "Port: %u Finished bnxt_dev_cleanup\n",
+ bp->eth_dev->data->port_id);
bnxt_wait_for_device_shutdown(bp);
rc = rte_eal_alarm_set(us, bnxt_dev_recover, (void *)bp);
if (rc)
- PMD_DRV_LOG(ERR, "Error setting recovery alarm");
+ PMD_DRV_LOG(ERR, "Port %u: Error setting recovery alarm",
+ bp->eth_dev->data->port_id);
}
uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index)
bp->flags |= BNXT_FLAG_FATAL_ERROR;
bp->flags |= BNXT_FLAG_FW_RESET;
- bnxt_stop_rxtx(bp);
+ bnxt_stop_rxtx(bp->eth_dev);
PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
+ size_t max_mac_addr = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR);
int rc = 0;
+ if (bp->max_l2_ctx > RTE_ETH_NUM_RECEIVE_MAC_ADDR)
+ PMD_DRV_LOG(INFO, "Max number of MAC addrs supported is %d, but will be limited to %d\n",
+ bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR);
+
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
- RTE_ETHER_ADDR_LEN *
- bp->max_l2_ctx,
+ RTE_ETHER_ADDR_LEN * max_mac_addr,
0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
/* Copy the permanent MAC from the FUNC_QCAPS response */
memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
+ /*
+ * Allocate memory to hold multicast mac addresses added.
+ * Used to restore them during reset recovery
+ */
+ bp->mcast_addr_list = rte_zmalloc("bnxt_mcast_addr_tbl",
+ sizeof(struct rte_ether_addr) *
+ BNXT_MAX_MC_ADDRS, 0);
+ if (bp->mcast_addr_list == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate multicast addr table\n");
+ return -ENOMEM;
+ }
+ bp->mc_list_dma_addr = rte_malloc_virt2iova(bp->mcast_addr_list);
+ if (bp->mc_list_dma_addr == RTE_BAD_IOVA) {
+ PMD_DRV_LOG(ERR, "Fail to map mcast_addr_list to physical memory\n");
+ return -ENOMEM;
+ }
+
return rc;
}
BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD);
}
-struct bnxt *
-bnxt_get_bp(uint16_t port)
-{
- struct bnxt *bp;
- struct rte_eth_dev *dev;
-
- if (!rte_eth_dev_is_valid_port(port)) {
- PMD_DRV_LOG(ERR, "Invalid port %d\n", port);
- return NULL;
- }
-
- dev = &rte_eth_devices[port];
- if (!is_bnxt_supported(dev)) {
- PMD_DRV_LOG(ERR, "Device %d not supported\n", port);
- return NULL;
- }
-
- bp = (struct bnxt *)dev->data->dev_private;
- if (!BNXT_TRUFLOW_EN(bp)) {
- PMD_DRV_LOG(ERR, "TRUFLOW not enabled\n");
- return NULL;
- }
-
- return bp;
-}
-
-uint16_t
-bnxt_get_svif(uint16_t port_id, bool func_svif,
- enum bnxt_ulp_intf_type type)
-{
- struct rte_eth_dev *eth_dev;
- struct bnxt *bp;
-
- eth_dev = &rte_eth_devices[port_id];
- if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
- struct bnxt_representor *vfr = eth_dev->data->dev_private;
- if (!vfr)
- return 0;
-
- if (type == BNXT_ULP_INTF_TYPE_VF_REP)
- return vfr->svif;
-
- eth_dev = vfr->parent_dev;
- }
-
- bp = eth_dev->data->dev_private;
-
- return func_svif ? bp->func_svif : bp->port_svif;
-}
-
-void
-bnxt_get_iface_mac(uint16_t port, enum bnxt_ulp_intf_type type,
- uint8_t *mac, uint8_t *parent_mac)
-{
- struct rte_eth_dev *eth_dev;
- struct bnxt *bp;
-
- if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF &&
- type != BNXT_ULP_INTF_TYPE_PF)
- return;
-
- eth_dev = &rte_eth_devices[port];
- bp = eth_dev->data->dev_private;
- memcpy(mac, bp->mac_addr, RTE_ETHER_ADDR_LEN);
-
- if (type == BNXT_ULP_INTF_TYPE_TRUSTED_VF)
- memcpy(parent_mac, bp->parent->mac_addr, RTE_ETHER_ADDR_LEN);
-}
-
-uint16_t
-bnxt_get_parent_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
-{
- struct rte_eth_dev *eth_dev;
- struct bnxt *bp;
-
- if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF)
- return 0;
-
- eth_dev = &rte_eth_devices[port];
- bp = eth_dev->data->dev_private;
-
- return bp->parent->vnic;
-}
-uint16_t
-bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
-{
- struct rte_eth_dev *eth_dev;
- struct bnxt_vnic_info *vnic;
- struct bnxt *bp;
-
- eth_dev = &rte_eth_devices[port];
- if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
- struct bnxt_representor *vfr = eth_dev->data->dev_private;
- if (!vfr)
- return 0;
-
- if (type == BNXT_ULP_INTF_TYPE_VF_REP)
- return vfr->dflt_vnic_id;
-
- eth_dev = vfr->parent_dev;
- }
-
- bp = eth_dev->data->dev_private;
-
- vnic = BNXT_GET_DEFAULT_VNIC(bp);
-
- return vnic->fw_vnic_id;
-}
-
-uint16_t
-bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type)
-{
- struct rte_eth_dev *eth_dev;
- struct bnxt *bp;
-
- eth_dev = &rte_eth_devices[port];
- if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
- struct bnxt_representor *vfr = eth_dev->data->dev_private;
- if (!vfr)
- return 0;
-
- if (type == BNXT_ULP_INTF_TYPE_VF_REP)
- return vfr->fw_fid;
-
- eth_dev = vfr->parent_dev;
- }
-
- bp = eth_dev->data->dev_private;
-
- return bp->fw_fid;
-}
-
-enum bnxt_ulp_intf_type
-bnxt_get_interface_type(uint16_t port)
-{
- struct rte_eth_dev *eth_dev;
- struct bnxt *bp;
-
- eth_dev = &rte_eth_devices[port];
- if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev))
- return BNXT_ULP_INTF_TYPE_VF_REP;
-
- bp = eth_dev->data->dev_private;
- if (BNXT_PF(bp))
- return BNXT_ULP_INTF_TYPE_PF;
- else if (BNXT_VF_IS_TRUSTED(bp))
- return BNXT_ULP_INTF_TYPE_TRUSTED_VF;
- else if (BNXT_VF(bp))
- return BNXT_ULP_INTF_TYPE_VF;
-
- return BNXT_ULP_INTF_TYPE_INVALID;
-}
-
-uint16_t
-bnxt_get_phy_port_id(uint16_t port_id)
-{
- struct bnxt_representor *vfr;
- struct rte_eth_dev *eth_dev;
- struct bnxt *bp;
-
- eth_dev = &rte_eth_devices[port_id];
- if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
- vfr = eth_dev->data->dev_private;
- if (!vfr)
- return 0;
-
- eth_dev = vfr->parent_dev;
- }
-
- bp = eth_dev->data->dev_private;
-
- return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id;
-}
-
-uint16_t
-bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type)
-{
- struct rte_eth_dev *eth_dev;
- struct bnxt *bp;
-
- eth_dev = &rte_eth_devices[port_id];
- if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
- struct bnxt_representor *vfr = eth_dev->data->dev_private;
- if (!vfr)
- return 0;
-
- if (type == BNXT_ULP_INTF_TYPE_VF_REP)
- return vfr->fw_fid - 1;
-
- eth_dev = vfr->parent_dev;
- }
-
- bp = eth_dev->data->dev_private;
-
- return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1;
-}
-
-uint16_t
-bnxt_get_vport(uint16_t port_id)
-{
- return (1 << bnxt_get_phy_port_id(port_id));
-}
-
static void bnxt_alloc_error_recovery_info(struct bnxt *bp)
{
struct bnxt_error_recovery_info *info = bp->recovery_info;
if (rc)
return rc;
- rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
- if (rc)
- return rc;
-
bnxt_hwrm_port_mac_qcfg(bp);
bnxt_hwrm_parent_pf_qcfg(bp);
}
}
+ if (!reconfig_dev) {
+ bp->rss_conf.rss_key = rte_zmalloc("bnxt_rss_key",
+ HW_HASH_KEY_SIZE, 0);
+ if (bp->rss_conf.rss_key == NULL) {
+ PMD_DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory",
+ bp->eth_dev->data->port_id);
+ return -ENOMEM;
+ }
+ }
+
rc = bnxt_alloc_mem(bp, reconfig_dev);
if (rc)
return rc;
return 0;
}
-static int
-bnxt_parse_devarg_accum_stats(__rte_unused const char *key,
- const char *value, void *opaque_arg)
-{
- struct bnxt *bp = opaque_arg;
- unsigned long accum_stats;
- char *end = NULL;
-
- if (!value || !opaque_arg) {
- PMD_DRV_LOG(ERR,
- "Invalid parameter passed to accum-stats devargs.\n");
- return -EINVAL;
- }
-
- accum_stats = strtoul(value, &end, 10);
- if (end == NULL || *end != '\0' ||
- (accum_stats == ULONG_MAX && errno == ERANGE)) {
- PMD_DRV_LOG(ERR,
- "Invalid parameter passed to accum-stats devargs.\n");
- return -EINVAL;
- }
-
- if (BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats)) {
- PMD_DRV_LOG(ERR,
- "Invalid value passed to accum-stats devargs.\n");
- return -EINVAL;
- }
-
- if (accum_stats) {
- bp->flags2 |= BNXT_FLAGS2_ACCUM_STATS_EN;
- PMD_DRV_LOG(INFO, "Host-based accum-stats feature enabled.\n");
- } else {
- bp->flags2 &= ~BNXT_FLAGS2_ACCUM_STATS_EN;
- PMD_DRV_LOG(INFO, "Host-based accum-stats feature disabled.\n");
- }
-
- return 0;
-}
-
static int
bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
const char *value, void *opaque_arg)
if (ret)
goto err;
- /*
- * Handler for "accum-stats" devarg.
- * Invoked as for ex: "-a 0000:00:0d.0,accum-stats=1"
- */
- rte_kvargs_process(kvlist, BNXT_DEVARG_ACCUM_STATS,
- bnxt_parse_devarg_accum_stats, bp);
/*
* Handler for "max_num_kflows" devarg.
* Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32"
if (!ctx)
return;
- if (ctx->va)
- rte_free(ctx->va);
+ rte_free(ctx->va);
ctx->va = NULL;
ctx->dma = RTE_BAD_IOVA;
if (!reconfig_dev) {
bnxt_free_hwrm_resources(bp);
bnxt_free_error_recovery_info(bp);
+ rte_free(bp->mcast_addr_list);
+ bp->mcast_addr_list = NULL;
+ rte_free(bp->rss_conf.rss_key);
+ bp->rss_conf.rss_key = NULL;
}
bnxt_uninit_ctx_mem(bp);
return 0;
bp->rep_info = rte_zmalloc("bnxt_rep_info",
- sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS,
+ sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS(bp),
0);
if (!bp->rep_info) {
PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n");
{
struct rte_eth_dev *vf_rep_eth_dev;
char name[RTE_ETH_NAME_MAX_LEN];
- struct bnxt *backing_bp;
+ struct bnxt *backing_bp = backing_eth_dev->data->dev_private;
+ uint16_t max_vf_reps = BNXT_MAX_VF_REPS(backing_bp);
+
uint16_t num_rep;
int i, ret = 0;
struct rte_kvargs *kvlist = NULL;
return -ENOTSUP;
}
num_rep = eth_da->nb_representor_ports;
- if (num_rep > BNXT_MAX_VF_REPS) {
+ if (num_rep > max_vf_reps) {
PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n",
- num_rep, BNXT_MAX_VF_REPS);
+ num_rep, max_vf_reps);
return -EINVAL;
}
return -EINVAL;
}
- backing_bp = backing_eth_dev->data->dev_private;
-
if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) {
PMD_DRV_LOG(ERR,
"Not a PF or trusted VF. No Representor support\n");
.parent_dev = backing_eth_dev
};
- if (representor.vf_id >= BNXT_MAX_VF_REPS) {
+ if (representor.vf_id >= max_vf_reps) {
PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n",
- representor.vf_id, BNXT_MAX_VF_REPS);
+ representor.vf_id, max_vf_reps);
continue;
}
RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE);
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
-
+RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");