X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_ethdev.c;h=181de42d15dcffd0dae73090a5dea0ac045d8f86;hb=0a90c56eacd5e345b42c7f152d33ca4c39343a52;hp=f3cd756447cfe273c56374fab7b18415ddba1f3e;hpb=f7e04f57ad61c8c343502f108c141c359d3d3db6;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index f3cd756447..181de42d15 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -87,7 +87,6 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { { .vendor_id = 0, /* sentinel */ }, }; -#define BNXT_DEVARG_ACCUM_STATS "accum-stats" #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" #define BNXT_DEVARG_REPRESENTOR "representor" @@ -101,7 +100,6 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { static const char *const bnxt_dev_args[] = { BNXT_DEVARG_REPRESENTOR, - BNXT_DEVARG_ACCUM_STATS, BNXT_DEVARG_FLOW_XSTAT, BNXT_DEVARG_MAX_NUM_KFLOWS, BNXT_DEVARG_REP_BASED_PF, @@ -114,12 +112,6 @@ static const char *const bnxt_dev_args[] = { NULL }; -/* - * accum-stats == false to disable flow counter accumulation - * accum-stats == true to enable flow counter accumulation - */ -#define BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats) ((accum_stats) > 1) - /* * app-id = an non-negative 8-bit number */ @@ -376,7 +368,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) if (rc) goto alloc_mem_err; - rc = bnxt_alloc_vnic_attributes(bp); + rc = bnxt_alloc_vnic_attributes(bp, reconfig); if (rc) goto alloc_mem_err; @@ -426,7 +418,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) goto err_out; /* Alloc RSS context only if RSS mode is enabled */ - if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { + if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) { int j, nr_ctxs = bnxt_rss_ctxts(bp); /* RSS table size in Thor is 512. @@ -458,7 +450,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) * setting is not available at this time, it will not be * configured correctly in the CFA. */ - if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) vnic->vlan_strip = true; else vnic->vlan_strip = false; @@ -493,7 +485,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, - (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ? + (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ? true : false); if (rc) goto err_out; @@ -729,22 +721,17 @@ error: static int bnxt_start_nic(struct bnxt *bp) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; uint32_t intr_vector = 0; uint32_t queue_id, base = BNXT_MISC_VEC_ID; uint32_t vec = BNXT_MISC_VEC_ID; unsigned int i, j; int rc; - if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) { - bp->eth_dev->data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_JUMBO_FRAME; + if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) bp->flags |= BNXT_FLAG_JUMBO; - } else { - bp->eth_dev->data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_JUMBO_FRAME; + else bp->flags &= ~BNXT_FLAG_JUMBO; - } /* THOR does not support ring groups. * But we will use the array to save RSS context IDs. @@ -752,12 +739,6 @@ static int bnxt_start_nic(struct bnxt *bp) if (BNXT_CHIP_P5(bp)) bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5; - rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); - if (rc) { - PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); - goto err_out; - } - rc = bnxt_alloc_hwrm_rings(bp); if (rc) { PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); @@ -795,19 +776,23 @@ skip_cosq_cfg: goto err_out; } - /* default vnic 0 */ - rc = bnxt_setup_one_vnic(bp, 0); - if (rc) - goto err_out; - /* VNIC configuration */ - if (BNXT_RFS_NEEDS_VNIC(bp)) { - for (i = 1; i < bp->nr_vnics; i++) { - rc = bnxt_setup_one_vnic(bp, i); - if (rc) - goto err_out; + for (j = 0; j < bp->rx_nr_rings; j++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[j]; + + if (!rxq->rx_deferred_start) { + bp->eth_dev->data->rx_queue_state[j] = + RTE_ETH_QUEUE_STATE_STARTED; + rxq->rx_started = true; } } + /* VNIC configuration */ + for (i = 0; i < bp->nr_vnics; i++) { + rc = bnxt_setup_one_vnic(bp, i); + if (rc) + goto err_out; + } + for (j = 0; j < bp->tx_nr_rings; j++) { struct bnxt_tx_queue *txq = bp->tx_queues[j]; @@ -818,16 +803,6 @@ skip_cosq_cfg: } } - for (j = 0; j < bp->rx_nr_rings; j++) { - struct bnxt_rx_queue *rxq = bp->rx_queues[j]; - - if (!rxq->rx_deferred_start) { - bp->eth_dev->data->rx_queue_state[j] = - RTE_ETH_QUEUE_STATE_STARTED; - rxq->rx_started = true; - } - } - rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); if (rc) { PMD_DRV_LOG(ERR, @@ -851,26 +826,24 @@ skip_cosq_cfg: return rc; } - if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { - intr_handle->intr_vec = - rte_zmalloc("intr_vec", - bp->eth_dev->data->nb_rx_queues * - sizeof(int), 0); - if (intr_handle->intr_vec == NULL) { + if (rte_intr_dp_is_en(intr_handle)) { + if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", + bp->eth_dev->data->nb_rx_queues)) { PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" " intr_vec", bp->eth_dev->data->nb_rx_queues); rc = -ENOMEM; goto err_out; } - PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " - "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", - intr_handle->intr_vec, intr_handle->nb_efd, - intr_handle->max_intr); + PMD_DRV_LOG(DEBUG, "intr_handle->nb_efd = %d " + "intr_handle->max_intr = %d\n", + rte_intr_nb_efd_get(intr_handle), + rte_intr_max_intr_get(intr_handle)); for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; queue_id++) { - intr_handle->intr_vec[queue_id] = - vec + BNXT_RX_VEC_START; - if (vec < base + intr_handle->nb_efd - 1) + rte_intr_vec_list_index_set(intr_handle, + queue_id, vec + BNXT_RX_VEC_START); + if (vec < base + rte_intr_nb_efd_get(intr_handle) + - 1) vec++; } } @@ -928,35 +901,35 @@ uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) link_speed = bp->link_info->support_pam4_speeds; if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) - speed_capa |= ETH_LINK_SPEED_100M; + speed_capa |= RTE_ETH_LINK_SPEED_100M; if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) - speed_capa |= ETH_LINK_SPEED_100M_HD; + speed_capa |= RTE_ETH_LINK_SPEED_100M_HD; if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) - speed_capa |= ETH_LINK_SPEED_1G; + speed_capa |= RTE_ETH_LINK_SPEED_1G; if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) - speed_capa |= ETH_LINK_SPEED_2_5G; + speed_capa |= RTE_ETH_LINK_SPEED_2_5G; if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) - speed_capa |= ETH_LINK_SPEED_10G; + speed_capa |= RTE_ETH_LINK_SPEED_10G; if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) - speed_capa |= ETH_LINK_SPEED_20G; + speed_capa |= RTE_ETH_LINK_SPEED_20G; if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) - speed_capa |= ETH_LINK_SPEED_25G; + speed_capa |= RTE_ETH_LINK_SPEED_25G; if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) - speed_capa |= ETH_LINK_SPEED_40G; + speed_capa |= RTE_ETH_LINK_SPEED_40G; if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) - speed_capa |= ETH_LINK_SPEED_50G; + speed_capa |= RTE_ETH_LINK_SPEED_50G; if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) - speed_capa |= ETH_LINK_SPEED_100G; + speed_capa |= RTE_ETH_LINK_SPEED_100G; if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) - speed_capa |= ETH_LINK_SPEED_50G; + speed_capa |= RTE_ETH_LINK_SPEED_50G; if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) - speed_capa |= ETH_LINK_SPEED_100G; + speed_capa |= RTE_ETH_LINK_SPEED_100G; if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) - speed_capa |= ETH_LINK_SPEED_200G; + speed_capa |= RTE_ETH_LINK_SPEED_200G; if (bp->link_info->auto_mode == HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) - speed_capa |= ETH_LINK_SPEED_FIXED; + speed_capa |= RTE_ETH_LINK_SPEED_FIXED; return speed_capa; } @@ -975,7 +948,7 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, return rc; /* MAC Specifics */ - dev_info->max_mac_addrs = bp->max_l2_ctx; + dev_info->max_mac_addrs = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); dev_info->max_hash_mac_addrs = 0; /* PF/VF specifics */ @@ -998,21 +971,16 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, dev_info->min_rx_bufsize = 1; dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; - dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; - if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; - if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_STRIP; - dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; - dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT | + dev_info->rx_offload_capa = bnxt_get_rx_port_offloads(bp); + dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; + dev_info->tx_offload_capa = bnxt_get_tx_port_offloads(bp) | dev_info->tx_queue_offload_capa; - if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT; dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; dev_info->speed_capa = bnxt_get_speed_capabilities(bp); dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -1054,8 +1022,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, */ /* VMDq resources */ - vpool = 64; /* ETH_64_POOLS */ - vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ + vpool = 64; /* RTE_ETH_64_POOLS */ + vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */ for (i = 0; i < 4; vpool >>= 1, i++) { if (max_vnics > vpool) { for (j = 0; j < 5; vrxq >>= 1, j++) { @@ -1087,6 +1055,7 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; + struct rte_eth_rss_conf *rss_conf = ð_dev->data->dev_conf.rx_adv_conf.rss_conf; int rc; bp->rx_queues = (void *)eth_dev->data->rx_queues; @@ -1150,17 +1119,28 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) goto resource_error; - if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && + if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && bp->max_vnics < eth_dev->data->nb_rx_queues) goto resource_error; bp->rx_cp_nr_rings = bp->rx_nr_rings; bp->tx_cp_nr_rings = bp->tx_nr_rings; - if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) - rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH; + if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) + rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; + /* application provides the hash key to program */ + if (rss_conf->rss_key != NULL) { + if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) + PMD_DRV_LOG(WARNING, "port %u RSS key len must be %d bytes long", + eth_dev->data->port_id, HW_HASH_KEY_SIZE); + else + memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE); + } + bp->rss_conf.rss_key_len = HW_HASH_KEY_SIZE; + bp->rss_conf.rss_hf = rss_conf->rss_hf; + bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); return 0; @@ -1184,10 +1164,10 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev) struct rte_eth_link *link = ð_dev->data->dev_link; if (link->link_status) - PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", + PMD_DRV_LOG(DEBUG, "Port %d Link Up - speed %u Mbps - %s\n", eth_dev->data->port_id, (uint32_t)link->link_speed, - (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? + (link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex\n")); else PMD_DRV_LOG(INFO, "Port %d Link Down\n", @@ -1204,10 +1184,10 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) uint16_t buf_size; int i; - if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) + if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) return 1; - if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) + if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) return 1; for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { @@ -1252,16 +1232,15 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev) * a limited subset have been enabled. */ if (eth_dev->data->dev_conf.rxmode.offloads & - ~(DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_KEEP_CRC | - DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | - DEV_RX_OFFLOAD_RSS_HASH | - DEV_RX_OFFLOAD_VLAN_FILTER)) + ~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP | + RTE_ETH_RX_OFFLOAD_KEEP_CRC | + RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_RSS_HASH | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) goto use_scalar_rx; #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) @@ -1313,7 +1292,7 @@ bnxt_transmit_function(struct rte_eth_dev *eth_dev) * or tx offloads. */ if (eth_dev->data->scattered_rx || - (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) || + (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) || BNXT_TRUFLOW_EN(bp)) goto use_scalar_tx; @@ -1479,16 +1458,14 @@ static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct rte_eth_link link; int ret; eth_dev->data->dev_started = 0; - eth_dev->data->scattered_rx = 0; /* Prevent crashes when queues are still in use */ - eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; - eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; + bnxt_stop_rxtx(eth_dev); bnxt_disable_int(bp); @@ -1521,10 +1498,7 @@ static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) /* Clean queue intr-vector mapping */ rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec != NULL) { - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } + rte_intr_vec_list_free(intr_handle); bnxt_hwrm_port_clr_stats(bp); bnxt_free_tx_mbufs(bp); @@ -1544,11 +1518,13 @@ static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) if (BNXT_FLOW_XSTATS_EN(bp)) bp->flow_stat->flow_count = 0; + eth_dev->data->scattered_rx = 0; + return 0; } /* Unload the driver, release resources */ -static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) +int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; @@ -1564,7 +1540,7 @@ static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) return bnxt_dev_stop(eth_dev); } -static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) +int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; @@ -1614,10 +1590,10 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) bnxt_link_update_op(eth_dev, 1); - if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) - vlan_mask |= ETH_VLAN_FILTER_MASK; - if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) - vlan_mask |= ETH_VLAN_STRIP_MASK; + if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) + vlan_mask |= RTE_ETH_VLAN_FILTER_MASK; + if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) + vlan_mask |= RTE_ETH_VLAN_STRIP_MASK; rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); if (rc) goto error; @@ -1696,6 +1672,7 @@ static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); bnxt_cancel_fc_thread(bp); + rte_eal_alarm_cancel(bnxt_handle_vf_cfg_change, (void *)bp); if (eth_dev->data->dev_started) ret = bnxt_dev_stop(eth_dev); @@ -1839,8 +1816,8 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) /* Retrieve link info from hardware */ rc = bnxt_get_hwrm_link_config(bp, &new); if (rc) { - new.link_speed = ETH_LINK_SPEED_100M; - new.link_duplex = ETH_LINK_FULL_DUPLEX; + new.link_speed = RTE_ETH_LINK_SPEED_100M; + new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; PMD_DRV_LOG(ERR, "Failed to retrieve link rc = 0x%x!\n", rc); goto out; @@ -2034,7 +2011,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, if (!vnic->rss_table) return -EINVAL; - if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) + if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) return -EINVAL; if (reta_size != tbl_size) { @@ -2047,8 +2024,8 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, for (i = 0; i < reta_size; i++) { struct bnxt_rx_queue *rxq; - idx = i / RTE_RETA_GROUP_SIZE; - sft = i % RTE_RETA_GROUP_SIZE; + idx = i / RTE_ETH_RETA_GROUP_SIZE; + sft = i % RTE_ETH_RETA_GROUP_SIZE; if (!(reta_conf[idx].mask & (1ULL << sft))) continue; @@ -2101,8 +2078,8 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, } for (idx = 0, i = 0; i < reta_size; i++) { - idx = i / RTE_RETA_GROUP_SIZE; - sft = i % RTE_RETA_GROUP_SIZE; + idx = i / RTE_ETH_RETA_GROUP_SIZE; + sft = i % RTE_ETH_RETA_GROUP_SIZE; if (reta_conf[idx].mask & (1ULL << sft)) { uint16_t qid; @@ -2140,7 +2117,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, * If RSS enablement were different than dev_configure, * then return -EINVAL */ - if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { if (!rss_conf->rss_hf) PMD_DRV_LOG(ERR, "Hash type NONE\n"); } else { @@ -2148,17 +2125,15 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, return -EINVAL; } - bp->flags |= BNXT_FLAG_UPDATE_HASH; - memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf, - rss_conf, - sizeof(*rss_conf)); - /* Update the default RSS VNIC(s) */ vnic = BNXT_GET_DEFAULT_VNIC(bp); vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); vnic->hash_mode = bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, - ETH_RSS_LEVEL(rss_conf->rss_hf)); + RTE_ETH_RSS_LEVEL(rss_conf->rss_hf)); + + /* Cache the hash function */ + bp->rss_conf.rss_hf = rss_conf->rss_hf; /* * If hashkey is not specified, use the previously configured @@ -2175,6 +2150,9 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, } memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); + /* Cache the hash key */ + memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE); + rss_config: rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); return rc; @@ -2203,30 +2181,30 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, hash_types = vnic->hash_type; rss_conf->rss_hf = 0; if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { - rss_conf->rss_hf |= ETH_RSS_IPV4; + rss_conf->rss_hf |= RTE_ETH_RSS_IPV4; hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; } if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { - rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; } if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { - rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP; hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; } if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { - rss_conf->rss_hf |= ETH_RSS_IPV6; + rss_conf->rss_hf |= RTE_ETH_RSS_IPV6; hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; } if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { - rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP; hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; } if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { - rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP; hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; } @@ -2266,17 +2244,17 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, fc_conf->autoneg = 1; switch (bp->link_info->pause) { case 0: - fc_conf->mode = RTE_FC_NONE; + fc_conf->mode = RTE_ETH_FC_NONE; break; case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: - fc_conf->mode = RTE_FC_TX_PAUSE; + fc_conf->mode = RTE_ETH_FC_TX_PAUSE; break; case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: - fc_conf->mode = RTE_FC_RX_PAUSE; + fc_conf->mode = RTE_ETH_FC_RX_PAUSE; break; case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): - fc_conf->mode = RTE_FC_FULL; + fc_conf->mode = RTE_ETH_FC_FULL; break; } return 0; @@ -2299,11 +2277,11 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, } switch (fc_conf->mode) { - case RTE_FC_NONE: + case RTE_ETH_FC_NONE: bp->link_info->auto_pause = 0; bp->link_info->force_pause = 0; break; - case RTE_FC_RX_PAUSE: + case RTE_ETH_FC_RX_PAUSE: if (fc_conf->autoneg) { bp->link_info->auto_pause = HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; @@ -2314,7 +2292,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; } break; - case RTE_FC_TX_PAUSE: + case RTE_ETH_FC_TX_PAUSE: if (fc_conf->autoneg) { bp->link_info->auto_pause = HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; @@ -2325,7 +2303,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; } break; - case RTE_FC_FULL: + case RTE_ETH_FC_FULL: if (fc_conf->autoneg) { bp->link_info->auto_pause = HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | @@ -2356,7 +2334,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, return rc; switch (udp_tunnel->prot_type) { - case RTE_TUNNEL_TYPE_VXLAN: + case RTE_ETH_TUNNEL_TYPE_VXLAN: if (bp->vxlan_port_cnt) { PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", udp_tunnel->udp_port); @@ -2370,7 +2348,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, tunnel_type = HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; break; - case RTE_TUNNEL_TYPE_GENEVE: + case RTE_ETH_TUNNEL_TYPE_GENEVE: if (bp->geneve_port_cnt) { PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", udp_tunnel->udp_port); @@ -2419,7 +2397,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, return rc; switch (udp_tunnel->prot_type) { - case RTE_TUNNEL_TYPE_VXLAN: + case RTE_ETH_TUNNEL_TYPE_VXLAN: if (!bp->vxlan_port_cnt) { PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); return -EINVAL; @@ -2436,7 +2414,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; port = bp->vxlan_fw_dst_port_id; break; - case RTE_TUNNEL_TYPE_GENEVE: + case RTE_ETH_TUNNEL_TYPE_GENEVE: if (!bp->geneve_port_cnt) { PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); return -EINVAL; @@ -2614,7 +2592,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) int rc; vnic = BNXT_GET_DEFAULT_VNIC(bp); - if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { + if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { /* Remove any VLAN filters programmed */ for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) bnxt_del_vlan_filter(bp, i); @@ -2634,7 +2612,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) bnxt_add_vlan_filter(bp, 0); } PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", - !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); + !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)); return 0; } @@ -2647,7 +2625,7 @@ static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) /* Destroy vnic filters and vnic */ if (bp->eth_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_FILTER) { + RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) bnxt_del_vlan_filter(bp, i); } @@ -2686,7 +2664,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) return rc; if (bp->eth_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_FILTER) { + RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { rc = bnxt_add_vlan_filter(bp, 0); if (rc) return rc; @@ -2704,7 +2682,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) return rc; PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", - !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); + !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)); return rc; } @@ -2724,22 +2702,22 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) if (!dev->data->dev_started) return 0; - if (mask & ETH_VLAN_FILTER_MASK) { + if (mask & RTE_ETH_VLAN_FILTER_MASK) { /* Enable or disable VLAN filtering */ rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); if (rc) return rc; } - if (mask & ETH_VLAN_STRIP_MASK) { + if (mask & RTE_ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); if (rc) return rc; } - if (mask & ETH_VLAN_EXTEND_MASK) { - if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + if (mask & RTE_ETH_VLAN_EXTEND_MASK) { + if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); else PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); @@ -2754,10 +2732,10 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, { struct bnxt *bp = dev->data->dev_private; int qinq = dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_EXTEND; + RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; - if (vlan_type != ETH_VLAN_TYPE_INNER && - vlan_type != ETH_VLAN_TYPE_OUTER) { + if (vlan_type != RTE_ETH_VLAN_TYPE_INNER && + vlan_type != RTE_ETH_VLAN_TYPE_OUTER) { PMD_DRV_LOG(ERR, "Unsupported vlan type."); return -EINVAL; @@ -2769,7 +2747,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, return -EINVAL; } - if (vlan_type == ETH_VLAN_TYPE_OUTER) { + if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { switch (tpid) { case RTE_ETHER_TYPE_QINQ: bp->outer_tpid_bd = @@ -2797,7 +2775,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, } bp->outer_tpid_bd |= tpid; PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); - } else if (vlan_type == ETH_VLAN_TYPE_INNER) { + } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) { PMD_DRV_LOG(ERR, "Can accelerate only outer vlan in QinQ\n"); return -EINVAL; @@ -2837,7 +2815,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, bnxt_del_dflt_mac_filter(bp, vnic); memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { + if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { /* This filter will allow only untagged packets */ rc = bnxt_add_vlan_filter(bp, 0); } else { @@ -2854,9 +2832,8 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, uint32_t nb_mc_addr) { struct bnxt *bp = eth_dev->data->dev_private; - char *mc_addr_list = (char *)mc_addr_set; struct bnxt_vnic_info *vnic; - uint32_t off = 0, i = 0; + uint32_t i = 0; int rc; rc = is_bnxt_in_error(bp); @@ -2865,6 +2842,8 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, vnic = BNXT_GET_DEFAULT_VNIC(bp); + bp->nb_mc_addr = nb_mc_addr; + if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; goto allmulti; @@ -2872,14 +2851,10 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, /* TODO Check for Duplicate mcast addresses */ vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; - for (i = 0; i < nb_mc_addr; i++) { - memcpy(vnic->mc_list + off, &mc_addr_list[i], - RTE_ETHER_ADDR_LEN); - off += RTE_ETHER_ADDR_LEN; - } + for (i = 0; i < nb_mc_addr; i++) + rte_ether_addr_copy(&mc_addr_set[i], &bp->mcast_addr_list[i]); - vnic->mc_addr_cnt = i; - if (vnic->mc_addr_cnt) + if (bp->nb_mc_addr) vnic->flags |= BNXT_VNIC_INFO_MCAST; else vnic->flags &= ~BNXT_VNIC_INFO_MCAST; @@ -4287,6 +4262,18 @@ static int bnxt_restore_mac_filters(struct bnxt *bp) return 0; } +static int bnxt_restore_mcast_mac_filters(struct bnxt *bp) +{ + int ret = 0; + + ret = bnxt_dev_set_mc_addr_list_op(bp->eth_dev, bp->mcast_addr_list, + bp->nb_mc_addr); + if (ret) + PMD_DRV_LOG(ERR, "Failed to restore multicast MAC addreeses\n"); + + return ret; +} + static int bnxt_restore_filters(struct bnxt *bp) { struct rte_eth_dev *dev = bp->eth_dev; @@ -4307,8 +4294,15 @@ static int bnxt_restore_filters(struct bnxt *bp) if (ret) return ret; + /* if vlans are already programmed, this can fail with -EEXIST */ ret = bnxt_restore_vlan_filters(bp); - /* TODO restore other filters as well */ + if (ret && ret != -EEXIST) + return ret; + + ret = bnxt_restore_mcast_mac_filters(bp); + if (ret) + return ret; + return ret; } @@ -4346,6 +4340,8 @@ static void bnxt_dev_recover(void *arg) /* Clear Error flag so that device re-init should happen */ bp->flags &= ~BNXT_FLAG_FATAL_ERROR; + PMD_DRV_LOG(INFO, "Port: %u Starting recovery...\n", + bp->eth_dev->data->port_id); rc = bnxt_check_fw_ready(bp); if (rc) @@ -4370,7 +4366,14 @@ static void bnxt_dev_recover(void *arg) if (rc) goto err_start; - PMD_DRV_LOG(INFO, "Recovered from FW reset\n"); + rte_eth_fp_ops[bp->eth_dev->data->port_id].rx_pkt_burst = + bp->eth_dev->rx_pkt_burst; + rte_eth_fp_ops[bp->eth_dev->data->port_id].tx_pkt_burst = + bp->eth_dev->tx_pkt_burst; + rte_mb(); + + PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n", + bp->eth_dev->data->port_id); pthread_mutex_unlock(&bp->err_recovery_lock); return; @@ -4384,7 +4387,8 @@ err: RTE_ETH_EVENT_INTR_RMV, NULL); pthread_mutex_unlock(&bp->err_recovery_lock); - PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); + PMD_DRV_LOG(ERR, "Port %u: Failed to recover from FW reset\n", + bp->eth_dev->data->port_id); } void bnxt_dev_reset_and_resume(void *arg) @@ -4395,6 +4399,8 @@ void bnxt_dev_reset_and_resume(void *arg) int rc; bnxt_dev_cleanup(bp); + PMD_DRV_LOG(INFO, "Port: %u Finished bnxt_dev_cleanup\n", + bp->eth_dev->data->port_id); bnxt_wait_for_device_shutdown(bp); @@ -4418,7 +4424,8 @@ void bnxt_dev_reset_and_resume(void *arg) rc = rte_eal_alarm_set(us, bnxt_dev_recover, (void *)bp); if (rc) - PMD_DRV_LOG(ERR, "Error setting recovery alarm"); + PMD_DRV_LOG(ERR, "Port %u: Error setting recovery alarm", + bp->eth_dev->data->port_id); } uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) @@ -4542,7 +4549,7 @@ reset: bp->flags |= BNXT_FLAG_FATAL_ERROR; bp->flags |= BNXT_FLAG_FW_RESET; - bnxt_stop_rxtx(bp); + bnxt_stop_rxtx(bp->eth_dev); PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); @@ -4997,11 +5004,15 @@ static int bnxt_alloc_stats_mem(struct bnxt *bp) static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; + size_t max_mac_addr = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); int rc = 0; + if (bp->max_l2_ctx > RTE_ETH_NUM_RECEIVE_MAC_ADDR) + PMD_DRV_LOG(INFO, "Max number of MAC addrs supported is %d, but will be limited to %d\n", + bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); + eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", - RTE_ETHER_ADDR_LEN * - bp->max_l2_ctx, + RTE_ETHER_ADDR_LEN * max_mac_addr, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); @@ -5028,6 +5039,23 @@ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) /* Copy the permanent MAC from the FUNC_QCAPS response */ memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); + /* + * Allocate memory to hold multicast mac addresses added. + * Used to restore them during reset recovery + */ + bp->mcast_addr_list = rte_zmalloc("bnxt_mcast_addr_tbl", + sizeof(struct rte_ether_addr) * + BNXT_MAX_MC_ADDRS, 0); + if (bp->mcast_addr_list == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate multicast addr table\n"); + return -ENOMEM; + } + bp->mc_list_dma_addr = rte_malloc_virt2iova(bp->mcast_addr_list); + if (bp->mc_list_dma_addr == RTE_BAD_IOVA) { + PMD_DRV_LOG(ERR, "Fail to map mcast_addr_list to physical memory\n"); + return -ENOMEM; + } + return rc; } @@ -5062,209 +5090,6 @@ static void bnxt_config_vf_req_fwd(struct bnxt *bp) BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); } -struct bnxt * -bnxt_get_bp(uint16_t port) -{ - struct bnxt *bp; - struct rte_eth_dev *dev; - - if (!rte_eth_dev_is_valid_port(port)) { - PMD_DRV_LOG(ERR, "Invalid port %d\n", port); - return NULL; - } - - dev = &rte_eth_devices[port]; - if (!is_bnxt_supported(dev)) { - PMD_DRV_LOG(ERR, "Device %d not supported\n", port); - return NULL; - } - - bp = (struct bnxt *)dev->data->dev_private; - if (!BNXT_TRUFLOW_EN(bp)) { - PMD_DRV_LOG(ERR, "TRUFLOW not enabled\n"); - return NULL; - } - - return bp; -} - -uint16_t -bnxt_get_svif(uint16_t port_id, bool func_svif, - enum bnxt_ulp_intf_type type) -{ - struct rte_eth_dev *eth_dev; - struct bnxt *bp; - - eth_dev = &rte_eth_devices[port_id]; - if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { - struct bnxt_representor *vfr = eth_dev->data->dev_private; - if (!vfr) - return 0; - - if (type == BNXT_ULP_INTF_TYPE_VF_REP) - return vfr->svif; - - eth_dev = vfr->parent_dev; - } - - bp = eth_dev->data->dev_private; - - return func_svif ? bp->func_svif : bp->port_svif; -} - -void -bnxt_get_iface_mac(uint16_t port, enum bnxt_ulp_intf_type type, - uint8_t *mac, uint8_t *parent_mac) -{ - struct rte_eth_dev *eth_dev; - struct bnxt *bp; - - if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF && - type != BNXT_ULP_INTF_TYPE_PF) - return; - - eth_dev = &rte_eth_devices[port]; - bp = eth_dev->data->dev_private; - memcpy(mac, bp->mac_addr, RTE_ETHER_ADDR_LEN); - - if (type == BNXT_ULP_INTF_TYPE_TRUSTED_VF) - memcpy(parent_mac, bp->parent->mac_addr, RTE_ETHER_ADDR_LEN); -} - -uint16_t -bnxt_get_parent_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) -{ - struct rte_eth_dev *eth_dev; - struct bnxt *bp; - - if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) - return 0; - - eth_dev = &rte_eth_devices[port]; - bp = eth_dev->data->dev_private; - - return bp->parent->vnic; -} -uint16_t -bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) -{ - struct rte_eth_dev *eth_dev; - struct bnxt_vnic_info *vnic; - struct bnxt *bp; - - eth_dev = &rte_eth_devices[port]; - if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { - struct bnxt_representor *vfr = eth_dev->data->dev_private; - if (!vfr) - return 0; - - if (type == BNXT_ULP_INTF_TYPE_VF_REP) - return vfr->dflt_vnic_id; - - eth_dev = vfr->parent_dev; - } - - bp = eth_dev->data->dev_private; - - vnic = BNXT_GET_DEFAULT_VNIC(bp); - - return vnic->fw_vnic_id; -} - -uint16_t -bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type) -{ - struct rte_eth_dev *eth_dev; - struct bnxt *bp; - - eth_dev = &rte_eth_devices[port]; - if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { - struct bnxt_representor *vfr = eth_dev->data->dev_private; - if (!vfr) - return 0; - - if (type == BNXT_ULP_INTF_TYPE_VF_REP) - return vfr->fw_fid; - - eth_dev = vfr->parent_dev; - } - - bp = eth_dev->data->dev_private; - - return bp->fw_fid; -} - -enum bnxt_ulp_intf_type -bnxt_get_interface_type(uint16_t port) -{ - struct rte_eth_dev *eth_dev; - struct bnxt *bp; - - eth_dev = &rte_eth_devices[port]; - if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) - return BNXT_ULP_INTF_TYPE_VF_REP; - - bp = eth_dev->data->dev_private; - if (BNXT_PF(bp)) - return BNXT_ULP_INTF_TYPE_PF; - else if (BNXT_VF_IS_TRUSTED(bp)) - return BNXT_ULP_INTF_TYPE_TRUSTED_VF; - else if (BNXT_VF(bp)) - return BNXT_ULP_INTF_TYPE_VF; - - return BNXT_ULP_INTF_TYPE_INVALID; -} - -uint16_t -bnxt_get_phy_port_id(uint16_t port_id) -{ - struct bnxt_representor *vfr; - struct rte_eth_dev *eth_dev; - struct bnxt *bp; - - eth_dev = &rte_eth_devices[port_id]; - if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { - vfr = eth_dev->data->dev_private; - if (!vfr) - return 0; - - eth_dev = vfr->parent_dev; - } - - bp = eth_dev->data->dev_private; - - return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id; -} - -uint16_t -bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type) -{ - struct rte_eth_dev *eth_dev; - struct bnxt *bp; - - eth_dev = &rte_eth_devices[port_id]; - if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { - struct bnxt_representor *vfr = eth_dev->data->dev_private; - if (!vfr) - return 0; - - if (type == BNXT_ULP_INTF_TYPE_VF_REP) - return vfr->fw_fid - 1; - - eth_dev = vfr->parent_dev; - } - - bp = eth_dev->data->dev_private; - - return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1; -} - -uint16_t -bnxt_get_vport(uint16_t port_id) -{ - return (1 << bnxt_get_phy_port_id(port_id)); -} - static void bnxt_alloc_error_recovery_info(struct bnxt *bp) { struct bnxt_error_recovery_info *info = bp->recovery_info; @@ -5393,10 +5218,6 @@ static int bnxt_get_config(struct bnxt *bp) if (rc) return rc; - rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp); - if (rc) - return rc; - bnxt_hwrm_port_mac_qcfg(bp); bnxt_hwrm_parent_pf_qcfg(bp); @@ -5487,6 +5308,16 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) } } + if (!reconfig_dev) { + bp->rss_conf.rss_key = rte_zmalloc("bnxt_rss_key", + HW_HASH_KEY_SIZE, 0); + if (bp->rss_conf.rss_key == NULL) { + PMD_DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory", + bp->eth_dev->data->port_id); + return -ENOMEM; + } + } + rc = bnxt_alloc_mem(bp, reconfig_dev); if (rc) return rc; @@ -5508,45 +5339,6 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) return 0; } -static int -bnxt_parse_devarg_accum_stats(__rte_unused const char *key, - const char *value, void *opaque_arg) -{ - struct bnxt *bp = opaque_arg; - unsigned long accum_stats; - char *end = NULL; - - if (!value || !opaque_arg) { - PMD_DRV_LOG(ERR, - "Invalid parameter passed to accum-stats devargs.\n"); - return -EINVAL; - } - - accum_stats = strtoul(value, &end, 10); - if (end == NULL || *end != '\0' || - (accum_stats == ULONG_MAX && errno == ERANGE)) { - PMD_DRV_LOG(ERR, - "Invalid parameter passed to accum-stats devargs.\n"); - return -EINVAL; - } - - if (BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats)) { - PMD_DRV_LOG(ERR, - "Invalid value passed to accum-stats devargs.\n"); - return -EINVAL; - } - - if (accum_stats) { - bp->flags2 |= BNXT_FLAGS2_ACCUM_STATS_EN; - PMD_DRV_LOG(INFO, "Host-based accum-stats feature enabled.\n"); - } else { - bp->flags2 &= ~BNXT_FLAGS2_ACCUM_STATS_EN; - PMD_DRV_LOG(INFO, "Host-based accum-stats feature disabled.\n"); - } - - return 0; -} - static int bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, const char *value, void *opaque_arg) @@ -5899,12 +5691,6 @@ bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) if (ret) goto err; - /* - * Handler for "accum-stats" devarg. - * Invoked as for ex: "-a 0000:00:0d.0,accum-stats=1" - */ - rte_kvargs_process(kvlist, BNXT_DEVARG_ACCUM_STATS, - bnxt_parse_devarg_accum_stats, bp); /* * Handler for "max_num_kflows" devarg. * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32" @@ -6093,8 +5879,7 @@ static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) if (!ctx) return; - if (ctx->va) - rte_free(ctx->va); + rte_free(ctx->va); ctx->va = NULL; ctx->dma = RTE_BAD_IOVA; @@ -6176,6 +5961,10 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) if (!reconfig_dev) { bnxt_free_hwrm_resources(bp); bnxt_free_error_recovery_info(bp); + rte_free(bp->mcast_addr_list); + bp->mcast_addr_list = NULL; + rte_free(bp->rss_conf.rss_key); + bp->rss_conf.rss_key = NULL; } bnxt_uninit_ctx_mem(bp); @@ -6243,7 +6032,7 @@ static int bnxt_init_rep_info(struct bnxt *bp) return 0; bp->rep_info = rte_zmalloc("bnxt_rep_info", - sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS, + sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS(bp), 0); if (!bp->rep_info) { PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); @@ -6285,7 +6074,9 @@ static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, { struct rte_eth_dev *vf_rep_eth_dev; char name[RTE_ETH_NAME_MAX_LEN]; - struct bnxt *backing_bp; + struct bnxt *backing_bp = backing_eth_dev->data->dev_private; + uint16_t max_vf_reps = BNXT_MAX_VF_REPS(backing_bp); + uint16_t num_rep; int i, ret = 0; struct rte_kvargs *kvlist = NULL; @@ -6298,9 +6089,9 @@ static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, return -ENOTSUP; } num_rep = eth_da->nb_representor_ports; - if (num_rep > BNXT_MAX_VF_REPS) { + if (num_rep > max_vf_reps) { PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", - num_rep, BNXT_MAX_VF_REPS); + num_rep, max_vf_reps); return -EINVAL; } @@ -6311,8 +6102,6 @@ static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, return -EINVAL; } - backing_bp = backing_eth_dev->data->dev_private; - if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { PMD_DRV_LOG(ERR, "Not a PF or trusted VF. No Representor support\n"); @@ -6332,9 +6121,9 @@ static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, .parent_dev = backing_eth_dev }; - if (representor.vf_id >= BNXT_MAX_VF_REPS) { + if (representor.vf_id >= max_vf_reps) { PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", - representor.vf_id, BNXT_MAX_VF_REPS); + representor.vf_id, max_vf_reps); continue; }