#define BROADCOM_DEV_ID_57508 0x1750
#define BROADCOM_DEV_ID_57504 0x1751
#define BROADCOM_DEV_ID_57502 0x1752
-#define BROADCOM_DEV_ID_57500_VF 0x1807
+#define BROADCOM_DEV_ID_57500_VF1 0x1806
+#define BROADCOM_DEV_ID_57500_VF2 0x1807
#define BROADCOM_DEV_ID_58802 0xd802
#define BROADCOM_DEV_ID_58804 0xd804
#define BROADCOM_DEV_ID_58808 0x16f0
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },
- { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) },
{ .vendor_id = 0, /* sentinel */ },
};
* High level utility functions
*/
+static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
+{
+ if (!BNXT_CHIP_THOR(bp))
+ return 1;
+
+ return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
+ BNXT_RSS_ENTRIES_PER_CTX_THOR) /
+ BNXT_RSS_ENTRIES_PER_CTX_THOR;
+}
+
+static uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp)
+{
+ if (!BNXT_CHIP_THOR(bp))
+ return HW_HASH_INDEX_SIZE;
+
+ return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
+}
+
static void bnxt_free_mem(struct bnxt *bp)
{
bnxt_free_filter_mem(bp);
bnxt_free_stats(bp);
bnxt_free_tx_rings(bp);
bnxt_free_rx_rings(bp);
+ bnxt_free_async_cp_ring(bp);
}
static int bnxt_alloc_mem(struct bnxt *bp)
{
int rc;
+ rc = bnxt_alloc_async_ring_struct(bp);
+ if (rc)
+ goto alloc_mem_err;
+
rc = bnxt_alloc_vnic_mem(bp);
if (rc)
goto alloc_mem_err;
if (rc)
goto alloc_mem_err;
+ rc = bnxt_alloc_async_cp_ring(bp);
+ if (rc)
+ goto alloc_mem_err;
+
return 0;
alloc_mem_err:
unsigned int i, j;
int rc;
- /* disable uio/vfio intr/eventfd mapping */
- rte_intr_disable(intr_handle);
-
if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
bp->eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
/* Alloc RSS context only if RSS mode is enabled */
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
+ int j, nr_ctxs = bnxt_rss_ctxts(bp);
+
+ rc = 0;
+ for (j = 0; j < nr_ctxs; j++) {
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
+ if (rc)
+ break;
+ }
if (rc) {
PMD_DRV_LOG(ERR,
- "HWRM vnic %d ctx alloc failure rc: %x\n",
- i, rc);
+ "HWRM vnic %d ctx %d alloc failure rc: %x\n",
+ i, j, rc);
goto err_out;
}
+ vnic->num_lb_ctxts = nr_ctxs;
}
/*
bp->rx_cp_nr_rings);
return -ENOTSUP;
}
- if (rte_intr_efd_enable(intr_handle, intr_vector))
- return -1;
+ rc = rte_intr_efd_enable(intr_handle, intr_vector);
+ if (rc)
+ return rc;
}
if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
if (intr_handle->intr_vec == NULL) {
PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
" intr_vec", bp->eth_dev->data->nb_rx_queues);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_disable;
}
PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
intr_handle->intr_vec, intr_handle->nb_efd,
intr_handle->max_intr);
- }
-
- for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
- queue_id++) {
- intr_handle->intr_vec[queue_id] = vec;
- if (vec < base + intr_handle->nb_efd - 1)
- vec++;
+ for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
+ queue_id++) {
+ intr_handle->intr_vec[queue_id] =
+ vec + BNXT_RX_VEC_START;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
}
/* enable uio/vfio intr/eventfd mapping */
- rte_intr_enable(intr_handle);
+ rc = rte_intr_enable(intr_handle);
+ if (rc)
+ goto err_free;
rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
- goto err_out;
+ goto err_free;
}
if (!bp->link_info.link_up) {
if (rc) {
PMD_DRV_LOG(ERR,
"HWRM link config failure rc: %x\n", rc);
- goto err_out;
+ goto err_free;
}
}
bnxt_print_link_info(bp->eth_dev);
return 0;
+err_free:
+ rte_free(intr_handle->intr_vec);
+err_disable:
+ rte_intr_efd_disable(intr_handle);
err_out:
- bnxt_free_all_hwrm_resources(bp);
-
/* Some of the error status returned by FW may not be from errno.h */
if (rc > 0)
rc = -EIO;
{
int rc;
- rc = bnxt_init_ring_grps(bp);
- if (rc)
- return rc;
+ if (BNXT_HAS_RING_GRPS(bp)) {
+ rc = bnxt_init_ring_grps(bp);
+ if (rc)
+ return rc;
+ }
bnxt_init_vnics(bp);
bnxt_init_filters(bp);
* Device configuration and status function
*/
-static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
- struct rte_eth_dev_info *dev_info)
+static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *dev_info)
{
struct bnxt *bp = eth_dev->data->dev_private;
uint16_t max_vnics, i, j, vpool, vrxq;
/* PF/VF specifics */
if (BNXT_PF(bp))
dev_info->max_vfs = bp->pdev->max_vfs;
- max_rx_rings = RTE_MIN(bp->max_vnics, bp->max_stat_ctx);
+ max_rx_rings = RTE_MIN(bp->max_rx_rings, bp->max_stat_ctx);
/* For the sake of symmetry, max_rx_queues = max_tx_queues */
dev_info->max_rx_queues = max_rx_rings;
dev_info->max_tx_queues = max_rx_rings;
- dev_info->reta_size = HW_HASH_INDEX_SIZE;
+ dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
dev_info->hash_key_size = 40;
max_vnics = bp->max_vnics;
dev_info->vmdq_pool_base = 0;
dev_info->vmdq_queue_base = 0;
+
+ return 0;
}
/* Configure the device based on the configuration provided */
/* Inherit new configurations */
if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
- eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
- bp->max_cp_rings ||
+ eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
+ + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
bp->max_stat_ctx)
goto resource_error;
{
#ifdef RTE_ARCH_X86
/*
- * Vector mode receive can be enabled only if scatter tx is not
- * in use and tx offloads other than VLAN insertion are not
- * in use.
+ * Vector mode transmit can be enabled only if not using scatter rx
+ * or tx offloads.
*/
if (!eth_dev->data->scattered_rx &&
- !(eth_dev->data->dev_conf.txmode.offloads &
- ~DEV_TX_OFFLOAD_VLAN_INSERT)) {
+ !eth_dev->data->dev_conf.txmode.offloads) {
PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
eth_dev->data->port_id);
return bnxt_xmit_pkts_vec;
"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
}
- bp->dev_stopped = 0;
rc = bnxt_init_chip(bp);
if (rc)
eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
+ bnxt_enable_int(bp);
bp->flags |= BNXT_FLAG_INIT_DONE;
+ bp->dev_stopped = 0;
return 0;
error:
static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ bnxt_disable_int(bp);
+
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
bp->flags &= ~BNXT_FLAG_INIT_DONE;
if (bp->eth_dev->data->dev_started) {
eth_dev->data->dev_link.link_status = 0;
}
bnxt_set_hwrm_link_config(bp, false);
+
+ /* Clean queue intr-vector mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+
bnxt_hwrm_port_clr_stats(bp);
bnxt_free_tx_mbufs(bp);
bnxt_free_rx_mbufs(bp);
struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
struct bnxt_filter_info *filter;
+ int rc = 0;
if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
return -ENODEV;
}
- STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+
filter->mac_index = index;
memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
- return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
+
+ rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
+ if (!rc) {
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+ } else {
+ filter->mac_index = INVALID_MAC_INDEX;
+ memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN);
+ bnxt_free_filter(bp, filter);
+ }
+
+ return rc;
}
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
"Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
}
- rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
- if (!wait_to_complete)
+ if (!wait_to_complete || new.link_status)
break;
- } while (!new.link_status && cnt--);
+
+ rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
+ } while (cnt--);
out:
/* Timed out or success */
/* Return rxq corresponding to a given rss table ring/group ID. */
static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
{
+ struct bnxt_rx_queue *rxq;
unsigned int i;
- for (i = 0; i < bp->rx_nr_rings; i++) {
- if (bp->grp_info[i].fw_grp_id == fwr)
- return i;
+ if (!BNXT_HAS_RING_GRPS(bp)) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ rxq = bp->eth_dev->data->rx_queues[i];
+ if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
+ return rxq->index;
+ }
+ } else {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ if (bp->grp_info[i].fw_grp_id == fwr)
+ return i;
+ }
}
return INVALID_HW_RING_ID;
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
- uint16_t tbl_size = HW_HASH_INDEX_SIZE;
+ uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
uint16_t idx, sft;
int i;
return -EINVAL;
}
+ if (BNXT_CHIP_THOR(bp)) {
+ vnic->rss_table[i * 2] =
+ rxq->rx_ring->rx_ring_struct->fw_ring_id;
+ vnic->rss_table[i * 2 + 1] =
+ rxq->cp_ring->cp_ring_struct->fw_ring_id;
+ } else {
+ vnic->rss_table[i] =
+ vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
+ }
+
vnic->rss_table[i] =
vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
}
{
struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
- uint16_t tbl_size = HW_HASH_INDEX_SIZE;
+ uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
uint16_t idx, sft, i;
/* Retrieve from the default VNIC */
if (reta_conf[idx].mask & (1ULL << sft)) {
uint16_t qid;
- qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
+ if (BNXT_CHIP_THOR(bp))
+ qid = bnxt_rss_to_qid(bp,
+ vnic->rss_table[i * 2]);
+ else
+ qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
if (qid == INVALID_HW_RING_ID) {
PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
{
- struct bnxt_filter_info *filter, *temp_filter, *new_filter;
+ struct bnxt_filter_info *filter;
struct bnxt_vnic_info *vnic;
- unsigned int i;
int rc = 0;
- uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
-
- /* Cycle through all VNICs */
- for (i = 0; i < bp->nr_vnics; i++) {
- /*
- * For each VNIC and each associated filter(s)
- * if VLAN exists && VLAN matches vlan_id
- * remove the MAC+VLAN filter
- * add a new MAC only filter
- * else
- * VLAN filter doesn't exist, just skip and continue
- */
- vnic = &bp->vnic_info[i];
- filter = STAILQ_FIRST(&vnic->filter);
- while (filter) {
- temp_filter = STAILQ_NEXT(filter, next);
+ uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
- if (filter->enables & chk &&
- filter->l2_ovlan == vlan_id) {
- /* Must delete the filter */
- STAILQ_REMOVE(&vnic->filter, filter,
- bnxt_filter_info, next);
- bnxt_hwrm_clear_l2_filter(bp, filter);
- STAILQ_INSERT_TAIL(&bp->free_filter_list,
- filter, next);
+ /* if VLAN exists && VLAN matches vlan_id
+ * remove the MAC+VLAN filter
+ * add a new MAC only filter
+ * else
+ * VLAN filter doesn't exist, just skip and continue
+ */
+ vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ /* Search for this matching MAC+VLAN filter */
+ if (filter->enables & chk && filter->l2_ivlan == vlan_id &&
+ !memcmp(filter->l2_addr,
+ bp->mac_addr,
+ RTE_ETHER_ADDR_LEN)) {
+ /* Delete the filter */
+ rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+ if (rc)
+ return rc;
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
- /*
- * Need to examine to see if the MAC
- * filter already existed or not before
- * allocating a new one
- */
-
- new_filter = bnxt_alloc_filter(bp);
- if (!new_filter) {
- PMD_DRV_LOG(ERR,
- "MAC/VLAN filter alloc failed\n");
- rc = -ENOMEM;
- goto exit;
- }
- STAILQ_INSERT_TAIL(&vnic->filter,
- new_filter, next);
- /* Inherit MAC from previous filter */
- new_filter->mac_index =
- filter->mac_index;
- memcpy(new_filter->l2_addr, filter->l2_addr,
- RTE_ETHER_ADDR_LEN);
- /* MAC only filter */
- rc = bnxt_hwrm_set_l2_filter(bp,
- vnic->fw_vnic_id,
- new_filter);
- if (rc)
- goto exit;
- PMD_DRV_LOG(INFO,
- "Del Vlan filter for %d\n",
- vlan_id);
- }
- filter = temp_filter;
+ PMD_DRV_LOG(INFO,
+ "Del Vlan filter for %d\n",
+ vlan_id);
+ return rc;
}
+ filter = STAILQ_NEXT(filter, next);
}
-exit:
- return rc;
+ return -ENOENT;
}
static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
{
- struct bnxt_filter_info *filter, *temp_filter, *new_filter;
+ struct bnxt_filter_info *filter;
struct bnxt_vnic_info *vnic;
- unsigned int i;
int rc = 0;
uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
- /* Cycle through all VNICs */
- for (i = 0; i < bp->nr_vnics; i++) {
- /*
- * For each VNIC and each associated filter(s)
- * if VLAN exists:
- * if VLAN matches vlan_id
- * VLAN filter already exists, just skip and continue
- * else
- * add a new MAC+VLAN filter
- * else
- * Remove the old MAC only filter
- * Add a new MAC+VLAN filter
- */
- vnic = &bp->vnic_info[i];
- filter = STAILQ_FIRST(&vnic->filter);
- while (filter) {
- temp_filter = STAILQ_NEXT(filter, next);
+ /* Implementation notes on the use of VNIC in this command:
+ *
+ * By default, these filters belong to default vnic for the function.
+ * Once these filters are set up, only destination VNIC can be modified.
+ * If the destination VNIC is not specified in this command,
+ * then the HWRM shall only create an l2 context id.
+ */
- if (filter->enables & chk) {
- if (filter->l2_ivlan == vlan_id)
- goto cont;
- } else {
- /* Must delete the MAC filter */
- STAILQ_REMOVE(&vnic->filter, filter,
- bnxt_filter_info, next);
- bnxt_hwrm_clear_l2_filter(bp, filter);
- filter->l2_ovlan = 0;
- STAILQ_INSERT_TAIL(&bp->free_filter_list,
- filter, next);
- }
- new_filter = bnxt_alloc_filter(bp);
- if (!new_filter) {
- PMD_DRV_LOG(ERR,
- "MAC/VLAN filter alloc failed\n");
- rc = -ENOMEM;
- goto exit;
- }
- STAILQ_INSERT_TAIL(&vnic->filter, new_filter, next);
- /* Inherit MAC from the previous filter */
- new_filter->mac_index = filter->mac_index;
- memcpy(new_filter->l2_addr, filter->l2_addr,
- RTE_ETHER_ADDR_LEN);
- /* MAC + VLAN ID filter */
- new_filter->l2_ivlan = vlan_id;
- new_filter->l2_ivlan_mask = 0xF000;
- new_filter->enables |= en;
- rc = bnxt_hwrm_set_l2_filter(bp,
- vnic->fw_vnic_id,
- new_filter);
- if (rc)
- goto exit;
- PMD_DRV_LOG(INFO,
- "Added Vlan filter for %d\n", vlan_id);
-cont:
- filter = temp_filter;
- }
+ vnic = BNXT_GET_DEFAULT_VNIC(bp);
+ filter = STAILQ_FIRST(&vnic->filter);
+ /* Check if the VLAN has already been added */
+ while (filter) {
+ if (filter->enables & chk && filter->l2_ivlan == vlan_id &&
+ !memcmp(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN))
+ return -EEXIST;
+
+ filter = STAILQ_NEXT(filter, next);
}
-exit:
+
+ /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC
+ * command to create MAC+VLAN filter with the right flags, enables set.
+ */
+ filter = bnxt_alloc_filter(bp);
+ if (!filter) {
+ PMD_DRV_LOG(ERR,
+ "MAC/VLAN filter alloc failed\n");
+ return -ENOMEM;
+ }
+ /* MAC + VLAN ID filter */
+ filter->l2_ivlan = vlan_id;
+ filter->l2_ivlan_mask = 0x0FFF;
+ filter->enables |= en;
+ rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
+ if (rc) {
+ /* Free the newly allocated filter as we were
+ * not able to create the filter in hardware.
+ */
+ filter->fw_l2_filter_id = UINT64_MAX;
+ STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
+ return rc;
+ }
+
+ /* Add this new filter to the list */
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+ PMD_DRV_LOG(INFO,
+ "Added Vlan filter for %d\n", vlan_id);
return rc;
}
if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
return -EPERM;
- memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
+ if (rte_is_zero_ether_addr(addr))
+ return -EINVAL;
STAILQ_FOREACH(filter, &vnic->filter, next) {
/* Default Filter is at Index 0 */
if (filter->mac_index != 0)
continue;
- rc = bnxt_hwrm_clear_l2_filter(bp, filter);
- if (rc)
- return rc;
+
memcpy(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
filter->enables |=
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
+
rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
if (rc)
return rc;
- filter->mac_index = 0;
+
+ memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
+ return 0;
}
return 0;
new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
VLAN_TAG_SIZE * BNXT_NUM_VLANS;
- bnxt_dev_info_get_op(eth_dev, &dev_info);
+ rc = bnxt_dev_info_get_op(eth_dev, &dev_info);
+ if (rc != 0) {
+ PMD_DRV_LOG(ERR, "Error during getting ethernet device info\n");
+ return rc;
+ }
if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
if (filter1 == NULL) {
- ret = -1;
+ ret = -EINVAL;
goto cleanup;
}
bfilter->enables |=
vnic0 = &bp->vnic_info[0];
filter1 = STAILQ_FIRST(&vnic0->filter);
if (filter1 == NULL) {
- ret = -1;
+ ret = -EINVAL;
goto free_filter;
}
return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
in_eeprom->data, in_eeprom->length);
- return 0;
}
/*
id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 ||
id == BROADCOM_DEV_ID_58802_VF ||
- id == BROADCOM_DEV_ID_57500_VF)
+ id == BROADCOM_DEV_ID_57500_VF1 ||
+ id == BROADCOM_DEV_ID_57500_VF2)
return true;
return false;
}
static int bnxt_init_board(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = eth_dev->data->dev_private;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- int rc;
+ struct bnxt *bp = eth_dev->data->dev_private;
/* enable device (incl. PCI PM wakeup), and bus-mastering */
- if (!pci_dev->mem_resource[0].addr) {
- PMD_DRV_LOG(ERR,
- "Cannot find PCI device base address, aborting\n");
- rc = -ENODEV;
- goto init_err_disable;
+ bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
+ bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
+ if (!bp->bar0 || !bp->doorbell_base) {
+ PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
+ return -ENODEV;
}
bp->eth_dev = eth_dev;
bp->pdev = pci_dev;
- bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
- if (!bp->bar0) {
- PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n");
- rc = -ENOMEM;
- goto init_err_release;
- }
-
- if (!pci_dev->mem_resource[2].addr) {
- PMD_DRV_LOG(ERR,
- "Cannot find PCI device BAR 2 address, aborting\n");
- rc = -ENODEV;
- goto init_err_release;
- } else {
- bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
- }
-
return 0;
-
-init_err_release:
- if (bp->bar0)
- bp->bar0 = NULL;
- if (bp->doorbell_base)
- bp->doorbell_base = NULL;
-
-init_err_disable:
-
- return rc;
}
static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp,
valid_bits = PTU_PTE_VALID;
if (rmem->nr_pages > 1) {
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_tbl%s_%x",
- suffix, idx);
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_ctx_pg_tbl%s_%x_%d",
+ suffix, idx, bp->eth_dev->data->port_id);
mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
mz = rte_memzone_lookup(mz_name);
if (!mz) {
PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == 0) {
+ if (mz_phys_addr == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map addr to phys memory\n");
return -ENOMEM;
rmem->pg_tbl_mz = mz;
}
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x", suffix, idx);
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
+ suffix, idx, bp->eth_dev->data->port_id);
mz = rte_memzone_lookup(mz_name);
if (!mz) {
mz = rte_memzone_reserve_aligned(mz_name,
bp->ctx = NULL;
}
-#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define min_t(type, x, y) ({ \
type __min1 = (x); \
return rc;
entries = ctx->qp_max_l2_entries;
- entries = roundup(entries, ctx->tqm_entries_multiple);
+ entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring,
ctx->tqm_max_entries_per_ring);
for (i = 0, ena = 0; i < BNXT_MAX_Q; i++) {
else
ctx->flags |= BNXT_CTX_FLAG_INITED;
+ return rc;
+}
+
+static int bnxt_alloc_stats_mem(struct bnxt *bp)
+{
+ struct rte_pci_device *pci_dev = bp->pdev;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz = NULL;
+ uint32_t total_alloc_len;
+ rte_iova_t mz_phys_addr;
+
+ if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
+ return 0;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "rx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
+ sizeof(struct rx_port_stats_ext) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name, total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ PMD_DRV_LOG(WARNING,
+ "Memzone physical address same as virtual.\n");
+ PMD_DRV_LOG(WARNING,
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
+ if (mz_phys_addr == RTE_BAD_IOVA) {
+ PMD_DRV_LOG(ERR,
+ "Can't map address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ bp->rx_mem_zone = (const void *)mz;
+ bp->hw_rx_port_stats = mz->addr;
+ bp->hw_rx_port_stats_map = mz_phys_addr;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "tx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
+ sizeof(struct tx_port_stats_ext) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name,
+ total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ PMD_DRV_LOG(WARNING,
+ "Memzone physical address same as virtual\n");
+ PMD_DRV_LOG(WARNING,
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
+ if (mz_phys_addr == RTE_BAD_IOVA) {
+ PMD_DRV_LOG(ERR,
+ "Can't map address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ bp->tx_mem_zone = (const void *)mz;
+ bp->hw_tx_port_stats = mz->addr;
+ bp->hw_tx_port_stats_map = mz_phys_addr;
+ bp->flags |= BNXT_FLAG_PORT_STATS;
+
+ /* Display extended statistics if FW supports it */
+ if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
+ bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
+ !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
+ return 0;
+
+ bp->hw_rx_port_stats_ext = (void *)
+ ((uint8_t *)bp->hw_rx_port_stats +
+ sizeof(struct rx_port_stats));
+ bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
+ sizeof(struct rx_port_stats);
+ bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
+
+ if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
+ bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
+ bp->hw_tx_port_stats_ext = (void *)
+ ((uint8_t *)bp->hw_tx_port_stats +
+ sizeof(struct tx_port_stats));
+ bp->hw_tx_port_stats_ext_map =
+ bp->hw_tx_port_stats_map +
+ sizeof(struct tx_port_stats);
+ bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
+ }
+
return 0;
}
+static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = eth_dev->data->dev_private;
+ int rc = 0;
+
+ eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
+ RTE_ETHER_ADDR_LEN *
+ bp->max_l2_ctx,
+ 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
+ return -ENOMEM;
+ }
+
+ if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) {
+ if (BNXT_PF(bp))
+ return -EINVAL;
+
+ /* Generate a random MAC address, if none was assigned by PF */
+ PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
+ bnxt_eth_hw_addr_random(bp->mac_addr);
+ PMD_DRV_LOG(INFO,
+ "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
+ bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
+
+ rc = bnxt_hwrm_set_mac(bp);
+ if (!rc)
+ memcpy(&bp->eth_dev->data->mac_addrs[0], bp->mac_addr,
+ RTE_ETHER_ADDR_LEN);
+ return rc;
+ }
+
+ /* Copy the permanent MAC from the FUNC_QCAPS response */
+ memcpy(bp->mac_addr, bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN);
+ memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
+
+ return rc;
+}
+
#define ALLOW_FUNC(x) \
{ \
- typeof(x) arg = (x); \
+ uint32_t arg = (x); \
bp->pf.vf_req_fwd[((arg) >> 5)] &= \
~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
}
bnxt_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- char mz_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz = NULL;
static int version_printed;
- uint32_t total_alloc_len;
- rte_iova_t mz_phys_addr;
struct bnxt *bp;
+ uint16_t mtu;
int rc;
if (version_printed++ == 0)
bp->dev_stopped = 1;
+ eth_dev->dev_ops = &bnxt_dev_ops;
+ eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
+ eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
+
+ /*
+ * For secondary processes, we don't initialise any further
+ * as primary has already done this work.
+ */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- goto skip_init;
+ return 0;
if (bnxt_vf_pciid(pci_dev->id.device_id))
bp->flags |= BNXT_FLAG_VF;
if (pci_dev->id.device_id == BROADCOM_DEV_ID_57508 ||
pci_dev->id.device_id == BROADCOM_DEV_ID_57504 ||
pci_dev->id.device_id == BROADCOM_DEV_ID_57502 ||
- pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF)
+ pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF1 ||
+ pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF2)
bp->flags |= BNXT_FLAG_THOR_CHIP;
+ if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
+ pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
+ pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
+ pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
+ bp->flags |= BNXT_FLAG_STINGRAY;
+
rc = bnxt_init_board(eth_dev);
if (rc) {
PMD_DRV_LOG(ERR,
"Board initialization failed rc: %x\n", rc);
goto error;
}
-skip_init:
- eth_dev->dev_ops = &bnxt_dev_ops;
- eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
- eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
- if (pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
- "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
- pci_dev->addr.bus, pci_dev->addr.devid,
- pci_dev->addr.function, "rx_port_stats");
- mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
- mz = rte_memzone_lookup(mz_name);
- total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
- sizeof(struct rx_port_stats) +
- sizeof(struct rx_port_stats_ext) +
- 512);
- if (!mz) {
- mz = rte_memzone_reserve(mz_name, total_alloc_len,
- SOCKET_ID_ANY,
- RTE_MEMZONE_2MB |
- RTE_MEMZONE_SIZE_HINT_ONLY |
- RTE_MEMZONE_IOVA_CONTIG);
- if (mz == NULL)
- return -ENOMEM;
- }
- memset(mz->addr, 0, mz->len);
- mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(INFO,
- "Memzone physical address same as virtual using rte_mem_virt2iova()\n");
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == 0) {
- PMD_DRV_LOG(ERR,
- "unable to map address to physical memory\n");
- return -ENOMEM;
- }
- }
-
- bp->rx_mem_zone = (const void *)mz;
- bp->hw_rx_port_stats = mz->addr;
- bp->hw_rx_port_stats_map = mz_phys_addr;
-
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
- "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
- pci_dev->addr.bus, pci_dev->addr.devid,
- pci_dev->addr.function, "tx_port_stats");
- mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
- mz = rte_memzone_lookup(mz_name);
- total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
- sizeof(struct tx_port_stats) +
- sizeof(struct tx_port_stats_ext) +
- 512);
- if (!mz) {
- mz = rte_memzone_reserve(mz_name,
- total_alloc_len,
- SOCKET_ID_ANY,
- RTE_MEMZONE_2MB |
- RTE_MEMZONE_SIZE_HINT_ONLY |
- RTE_MEMZONE_IOVA_CONTIG);
- if (mz == NULL)
- return -ENOMEM;
- }
- memset(mz->addr, 0, mz->len);
- mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(WARNING,
- "Memzone physical address same as virtual.\n");
- PMD_DRV_LOG(WARNING,
- "Using rte_mem_virt2iova()\n");
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == 0) {
- PMD_DRV_LOG(ERR,
- "unable to map address to physical memory\n");
- return -ENOMEM;
- }
- }
- bp->tx_mem_zone = (const void *)mz;
- bp->hw_tx_port_stats = mz->addr;
- bp->hw_tx_port_stats_map = mz_phys_addr;
-
- bp->flags |= BNXT_FLAG_PORT_STATS;
-
- /* Display extended statistics if FW supports it */
- if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
- bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0)
- goto skip_ext_stats;
-
- bp->hw_rx_port_stats_ext = (void *)
- (bp->hw_rx_port_stats + sizeof(struct rx_port_stats));
- bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
- sizeof(struct rx_port_stats);
- bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
-
-
- if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2) {
- bp->hw_tx_port_stats_ext = (void *)
- (bp->hw_tx_port_stats + sizeof(struct tx_port_stats));
- bp->hw_tx_port_stats_ext_map =
- bp->hw_tx_port_stats_map +
- sizeof(struct tx_port_stats);
- bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
- }
- }
-
-skip_ext_stats:
rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
PMD_DRV_LOG(ERR,
PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
goto error_free;
}
+
+ rc = bnxt_alloc_stats_mem(bp);
+ if (rc)
+ goto error_free;
+
if (bp->max_tx_rings == 0) {
PMD_DRV_LOG(ERR, "No TX rings available!\n");
rc = -EBUSY;
goto error_free;
}
- eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
- RTE_ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
- if (eth_dev->data->mac_addrs == NULL) {
- PMD_DRV_LOG(ERR,
- "Failed to alloc %u bytes needed to store MAC addr tbl",
- RTE_ETHER_ADDR_LEN * bp->max_l2_ctx);
- rc = -ENOMEM;
- goto error_free;
- }
- if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) {
- PMD_DRV_LOG(ERR,
- "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
- bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
- bp->dflt_mac_addr[2], bp->dflt_mac_addr[3],
- bp->dflt_mac_addr[4], bp->dflt_mac_addr[5]);
- rc = -EINVAL;
+ rc = bnxt_setup_mac_addr(eth_dev);
+ if (rc)
goto error_free;
- }
- /* Copy the permanent MAC from the qcap response address now. */
- memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
- memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
/* THOR does not support ring groups.
* But we will use the array to save RSS context IDs.
goto error_free;
}
- bp->grp_info = rte_zmalloc("bnxt_grp_info",
- sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
- if (!bp->grp_info) {
- PMD_DRV_LOG(ERR,
- "Failed to alloc %zu bytes to store group info table\n",
- sizeof(*bp->grp_info) * bp->max_ring_grps);
- rc = -ENOMEM;
- goto error_free;
+ if (BNXT_HAS_RING_GRPS(bp)) {
+ bp->grp_info = rte_zmalloc("bnxt_grp_info",
+ sizeof(*bp->grp_info) *
+ bp->max_ring_grps, 0);
+ if (!bp->grp_info) {
+ PMD_DRV_LOG(ERR,
+ "Failed to alloc %zu bytes for grp info tbl.\n",
+ sizeof(*bp->grp_info) * bp->max_ring_grps);
+ rc = -ENOMEM;
+ goto error_free;
+ }
}
/* Forward all requests if firmware is new enough */
pci_dev->mem_resource[0].phys_addr,
pci_dev->mem_resource[0].addr);
- rc = bnxt_hwrm_func_qcfg(bp);
+ rc = bnxt_hwrm_func_qcfg(bp, &mtu);
if (rc) {
PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
goto error_free;
}
+ if (mtu >= RTE_ETHER_MIN_MTU && mtu <= BNXT_MAX_MTU &&
+ mtu != eth_dev->data->mtu)
+ eth_dev->data->mtu = mtu;
+
if (BNXT_PF(bp)) {
//if (bp->pf.active_vfs) {
// TODO: Deallocate VF resources?
rc = bnxt_alloc_mem(bp);
if (rc)
- goto error_free_int;
+ goto error_free;
+
+ bnxt_init_nic(bp);
rc = bnxt_request_int(bp);
if (rc)
- goto error_free_int;
-
- bnxt_enable_int(bp);
- bnxt_init_nic(bp);
+ goto error_free;
return 0;
-error_free_int:
- bnxt_disable_int(bp);
- bnxt_hwrm_func_buf_unrgtr(bp);
- bnxt_free_int(bp);
- bnxt_free_mem(bp);
error_free:
bnxt_dev_uninit(eth_dev);
error:
bnxt_disable_int(bp);
bnxt_free_int(bp);
bnxt_free_mem(bp);
+
+ bnxt_hwrm_func_buf_unrgtr(bp);
+
if (bp->grp_info != NULL) {
rte_free(bp->grp_info);
bp->grp_info = NULL;
static struct rte_pci_driver bnxt_rte_pmd = {
.id_table = bnxt_pci_id_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
- RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_IOVA_AS_VA,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
.probe = bnxt_pci_probe,
.remove = bnxt_pci_remove,
};