#define BROADCOM_DEV_ID_57407_MF 0x16ea
#define BROADCOM_DEV_ID_57414_MF 0x16ec
#define BROADCOM_DEV_ID_57416_MF 0x16ee
+#define BROADCOM_DEV_ID_57508 0x1750
+#define BROADCOM_DEV_ID_57504 0x1751
+#define BROADCOM_DEV_ID_57502 0x1752
+#define BROADCOM_DEV_ID_57500_VF 0x1807
#define BROADCOM_DEV_ID_58802 0xd802
#define BROADCOM_DEV_ID_58804 0xd804
#define BROADCOM_DEV_ID_58808 0x16f0
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF) },
{ .vendor_id = 0, /* sentinel */ },
};
* High level utility functions
*/
+static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
+{
+ if (!BNXT_CHIP_THOR(bp))
+ return 1;
+
+ return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
+ BNXT_RSS_ENTRIES_PER_CTX_THOR) /
+ BNXT_RSS_ENTRIES_PER_CTX_THOR;
+}
+
+static uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp)
+{
+ if (!BNXT_CHIP_THOR(bp))
+ return HW_HASH_INDEX_SIZE;
+
+ return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
+}
+
static void bnxt_free_mem(struct bnxt *bp)
{
bnxt_free_filter_mem(bp);
bp->flags &= ~BNXT_FLAG_JUMBO;
}
+ /* THOR does not support ring groups.
+ * But we will use the array to save RSS context IDs.
+ */
+ if (BNXT_CHIP_THOR(bp))
+ bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
+
rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
if (rc) {
PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
/* Alloc RSS context only if RSS mode is enabled */
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
+ int j, nr_ctxs = bnxt_rss_ctxts(bp);
+
+ rc = 0;
+ for (j = 0; j < nr_ctxs; j++) {
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
+ if (rc)
+ break;
+ }
if (rc) {
PMD_DRV_LOG(ERR,
- "HWRM vnic %d ctx alloc failure rc: %x\n",
- i, rc);
+ "HWRM vnic %d ctx %d alloc failure rc: %x\n",
+ i, j, rc);
goto err_out;
}
+ vnic->num_lb_ctxts = nr_ctxs;
}
/*
"rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
j, rxq->vnic, rxq->vnic->fw_grp_ids);
- if (rxq->rx_deferred_start)
+ if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
}
"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
intr_handle->intr_vec, intr_handle->nb_efd,
intr_handle->max_intr);
- }
-
- for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
- queue_id++) {
- intr_handle->intr_vec[queue_id] = vec;
- if (vec < base + intr_handle->nb_efd - 1)
- vec++;
+ for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
+ queue_id++) {
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
}
/* enable uio/vfio intr/eventfd mapping */
static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *dev_info)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
uint16_t max_vnics, i, j, vpool, vrxq;
unsigned int max_rx_rings;
/* For the sake of symmetry, max_rx_queues = max_tx_queues */
dev_info->max_rx_queues = max_rx_rings;
dev_info->max_tx_queues = max_rx_rings;
- dev_info->reta_size = HW_HASH_INDEX_SIZE;
+ dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
dev_info->hash_key_size = 40;
max_vnics = bp->max_vnics;
/* Configure the device based on the configuration provided */
static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
int rc;
eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
bp->max_cp_rings ||
eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
- bp->max_stat_ctx ||
- (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps ||
- (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
- bp->max_vnics < eth_dev->data->nb_rx_queues)) {
- PMD_DRV_LOG(ERR,
- "Insufficient resources to support requested config\n");
- PMD_DRV_LOG(ERR,
- "Num Queues Requested: Tx %d, Rx %d\n",
- eth_dev->data->nb_tx_queues,
- eth_dev->data->nb_rx_queues);
- PMD_DRV_LOG(ERR,
- "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
- bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
- bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
- return -ENOSPC;
- }
+ bp->max_stat_ctx)
+ goto resource_error;
+
+ if (BNXT_HAS_RING_GRPS(bp) &&
+ (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
+ goto resource_error;
+
+ if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+ bp->max_vnics < eth_dev->data->nb_rx_queues)
+ goto resource_error;
bp->rx_cp_nr_rings = bp->rx_nr_rings;
bp->tx_cp_nr_rings = bp->tx_nr_rings;
bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
}
return 0;
+
+resource_error:
+ PMD_DRV_LOG(ERR,
+ "Insufficient resources to support requested config\n");
+ PMD_DRV_LOG(ERR,
+ "Num Queues Requested: Tx %d, Rx %d\n",
+ eth_dev->data->nb_tx_queues,
+ eth_dev->data->nb_rx_queues);
+ PMD_DRV_LOG(ERR,
+ "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
+ bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
+ bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
+ return -ENOSPC;
}
static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
eth_dev->data->port_id);
}
-static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
-{
- bnxt_print_link_info(eth_dev);
- return 0;
-}
-
/*
* Determine whether the current configuration requires support for scattered
* receive; return 1 if scattered receive is required and 0 if not.
return 0;
}
+static eth_rx_burst_t
+bnxt_receive_function(__rte_unused struct rte_eth_dev *eth_dev)
+{
+#ifdef RTE_ARCH_X86
+ /*
+ * Vector mode receive can be enabled only if scatter rx is not
+ * in use and rx offloads are limited to VLAN stripping and
+ * CRC stripping.
+ */
+ if (!eth_dev->data->scattered_rx &&
+ !(eth_dev->data->dev_conf.rxmode.offloads &
+ ~(DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_VLAN_FILTER))) {
+ PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
+ eth_dev->data->port_id);
+ return bnxt_recv_pkts_vec;
+ }
+ PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
+ eth_dev->data->port_id);
+ PMD_DRV_LOG(INFO,
+ "Port %d scatter: %d rx offload: %" PRIX64 "\n",
+ eth_dev->data->port_id,
+ eth_dev->data->scattered_rx,
+ eth_dev->data->dev_conf.rxmode.offloads);
+#endif
+ return bnxt_recv_pkts;
+}
+
+static eth_tx_burst_t
+bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
+{
+#ifdef RTE_ARCH_X86
+ /*
+ * Vector mode receive can be enabled only if scatter tx is not
+ * in use and tx offloads other than VLAN insertion are not
+ * in use.
+ */
+ if (!eth_dev->data->scattered_rx &&
+ !(eth_dev->data->dev_conf.txmode.offloads &
+ ~DEV_TX_OFFLOAD_VLAN_INSERT)) {
+ PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
+ eth_dev->data->port_id);
+ return bnxt_xmit_pkts_vec;
+ }
+ PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
+ eth_dev->data->port_id);
+ PMD_DRV_LOG(INFO,
+ "Port %d scatter: %d tx offload: %" PRIX64 "\n",
+ eth_dev->data->port_id,
+ eth_dev->data->scattered_rx,
+ eth_dev->data->dev_conf.txmode.offloads);
+#endif
+ return bnxt_xmit_pkts;
+}
+
static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
int vlan_mask = 0;
int rc;
if (rc)
goto error;
+ eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
+ eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
bp->flags |= BNXT_FLAG_INIT_DONE;
return 0;
static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
int rc = 0;
if (!bp->link_info.link_up)
static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
eth_dev->data->dev_link.link_status = 0;
bnxt_set_hwrm_link_config(bp, false);
/* Unload the driver, release resources */
static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
bp->flags &= ~BNXT_FLAG_INIT_DONE;
if (bp->eth_dev->data->dev_started) {
static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
if (bp->dev_stopped == 0)
bnxt_dev_stop_op(eth_dev);
static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
uint32_t index)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
struct bnxt_vnic_info *vnic;
struct bnxt_filter_info *filter, *temp_filter;
struct rte_ether_addr *mac_addr,
uint32_t index, uint32_t pool)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
struct bnxt_filter_info *filter;
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
{
int rc = 0;
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_link new;
unsigned int cnt = BNXT_LINK_WAIT_CNT;
static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic;
if (bp->vnic_info == NULL)
static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic;
if (bp->vnic_info == NULL)
static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic;
if (bp->vnic_info == NULL)
static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic;
if (bp->vnic_info == NULL)
bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
}
+/* Return bnxt_rx_queue pointer corresponding to a given rxq. */
+static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
+{
+ if (qid >= bp->rx_nr_rings)
+ return NULL;
+
+ return bp->eth_dev->data->rx_queues[qid];
+}
+
+/* Return rxq corresponding to a given rss table ring/group ID. */
+static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
+{
+ struct bnxt_rx_queue *rxq;
+ unsigned int i;
+
+ if (!BNXT_HAS_RING_GRPS(bp)) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ rxq = bp->eth_dev->data->rx_queues[i];
+ if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
+ return rxq->index;
+ }
+ } else {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ if (bp->grp_info[i].fw_grp_id == fwr)
+ return i;
+ }
+ }
+
+ return INVALID_HW_RING_ID;
+}
+
static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
- struct bnxt_vnic_info *vnic;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
+ uint16_t idx, sft;
int i;
+ if (!vnic->rss_table)
+ return -EINVAL;
+
if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
return -EINVAL;
- if (reta_size != HW_HASH_INDEX_SIZE) {
+ if (reta_size != tbl_size) {
PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
- "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+ "(%d)\n", reta_size, tbl_size);
return -EINVAL;
}
- /* Update the RSS VNIC(s) */
- for (i = 0; i < bp->max_vnics; i++) {
- vnic = &bp->vnic_info[i];
- memcpy(vnic->rss_table, reta_conf, reta_size);
- bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+
+ for (i = 0; i < reta_size; i++) {
+ struct bnxt_rx_queue *rxq;
+
+ idx = i / RTE_RETA_GROUP_SIZE;
+ sft = i % RTE_RETA_GROUP_SIZE;
+
+ if (!(reta_conf[idx].mask & (1ULL << sft)))
+ continue;
+
+ rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
+ return -EINVAL;
+ }
+
+ if (BNXT_CHIP_THOR(bp)) {
+ vnic->rss_table[i * 2] =
+ rxq->rx_ring->rx_ring_struct->fw_ring_id;
+ vnic->rss_table[i * 2 + 1] =
+ rxq->cp_ring->cp_ring_struct->fw_ring_id;
+ } else {
+ vnic->rss_table[i] =
+ vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
+ }
+
+ vnic->rss_table[i] =
+ vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
}
+
+ bnxt_hwrm_vnic_rss_cfg(bp, vnic);
return 0;
}
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
- struct rte_intr_handle *intr_handle
- = &bp->pdev->intr_handle;
+ uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
+ uint16_t idx, sft, i;
/* Retrieve from the default VNIC */
if (!vnic)
if (!vnic->rss_table)
return -EINVAL;
- if (reta_size != HW_HASH_INDEX_SIZE) {
+ if (reta_size != tbl_size) {
PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
- "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+ "(%d)\n", reta_size, tbl_size);
return -EINVAL;
}
- /* EW - need to revisit here copying from uint64_t to uint16_t */
- memcpy(reta_conf, vnic->rss_table, reta_size);
- if (rte_intr_allow_others(intr_handle)) {
- if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
- bnxt_dev_lsc_intr_setup(eth_dev);
+ for (idx = 0, i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ sft = i % RTE_RETA_GROUP_SIZE;
+
+ if (reta_conf[idx].mask & (1ULL << sft)) {
+ uint16_t qid;
+
+ if (BNXT_CHIP_THOR(bp))
+ qid = bnxt_rss_to_qid(bp,
+ vnic->rss_table[i * 2]);
+ else
+ qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
+
+ if (qid == INVALID_HW_RING_ID) {
+ PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
+ return -EINVAL;
+ }
+ reta_conf[idx].reta[sft] = qid;
+ }
}
return 0;
static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_vnic_info *vnic;
uint16_t hash_type = 0;
static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
int len;
uint32_t hash_types;
static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct rte_eth_link link_info;
int rc;
static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
struct rte_eth_udp_tunnel *udp_tunnel)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
uint16_t tunnel_type = 0;
int rc = 0;
bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
struct rte_eth_udp_tunnel *udp_tunnel)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
uint16_t tunnel_type = 0;
uint16_t port = 0;
int rc = 0;
static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
uint16_t vlan_id, int on)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
/* These operations apply to ALL existing MAC/VLAN filters */
if (on)
static int
bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
unsigned int i;
bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
struct rte_ether_addr *addr)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
/* Default Filter is tied to VNIC 0 */
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
struct bnxt_filter_info *filter;
struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
char *mc_addr_list = (char *)mc_addr_set;
struct bnxt_vnic_info *vnic;
uint32_t off = 0, i = 0;
static int
bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
{
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_dev_info dev_info;
+ uint32_t new_pkt_size;
uint32_t rc = 0;
uint32_t i;
+ new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
+ VLAN_TAG_SIZE * BNXT_NUM_VLANS;
+
bnxt_dev_info_get_op(eth_dev, &dev_info);
if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
return -EINVAL;
}
+#ifdef RTE_ARCH_X86
+ /*
+ * If vector-mode tx/rx is active, disallow any MTU change that would
+ * require scattered receive support.
+ */
+ if (eth_dev->data->dev_started &&
+ (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec ||
+ eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) &&
+ (new_pkt_size >
+ eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+ PMD_DRV_LOG(ERR,
+ "MTU change would require scattered rx support. ");
+ PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
+ return -EINVAL;
+ }
+#endif
+
if (new_mtu > RTE_ETHER_MTU) {
bp->flags |= BNXT_FLAG_JUMBO;
bp->eth_dev->data->dev_conf.rxmode.offloads |=
bp->flags &= ~BNXT_FLAG_JUMBO;
}
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
- new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
- VLAN_TAG_SIZE * 2;
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
eth_dev->data->mtu = new_mtu;
PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
static int
bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
uint16_t vlan = bp->vlan;
int rc;
static int
bnxt_dev_led_on_op(struct rte_eth_dev *dev)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
return bnxt_hwrm_port_led_cfg(bp, true);
}
static int
bnxt_dev_led_off_op(struct rte_eth_dev *dev)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
return bnxt_hwrm_port_led_cfg(bp, false);
}
enum rte_filter_op filter_op,
void *arg)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct rte_eth_ethertype_filter *efilter =
(struct rte_eth_ethertype_filter *)arg;
struct bnxt_filter_info *bfilter, *filter1;
enum rte_filter_op filter_op,
void *arg)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
int ret;
if (filter_op == RTE_ETH_FILTER_NOP)
enum rte_filter_op filter_op,
void *arg)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg;
struct bnxt_filter_info *filter, *match;
struct bnxt_vnic_info *vnic, *mvnic;
RTE_PTYPE_UNKNOWN
};
- if (dev->rx_pkt_burst == bnxt_recv_pkts)
- return ptypes;
- return NULL;
+ if (!dev->rx_pkt_burst)
+ return NULL;
+
+ return ptypes;
}
static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
{
uint64_t ns;
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
{
uint64_t ns, systime_cycles;
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
static int
bnxt_timesync_enable(struct rte_eth_dev *dev)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
uint32_t shift = 0;
static int
bnxt_timesync_disable(struct rte_eth_dev *dev)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
struct timespec *timestamp,
uint32_t flags __rte_unused)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
uint64_t rx_tstamp_cycles = 0;
uint64_t ns;
bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
struct timespec *timestamp)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
uint64_t tx_tstamp_cycles = 0;
uint64_t ns;
static int
bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
static int
bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
int rc;
uint32_t dir_entries;
uint32_t entry_length;
bnxt_get_eeprom_op(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *in_eeprom)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
uint32_t index;
uint32_t offset;
bnxt_set_eeprom_op(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *in_eeprom)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
uint8_t index, dir_op;
uint16_t type, ext, ordinal, attr;
id == BROADCOM_DEV_ID_57414_VF ||
id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 ||
- id == BROADCOM_DEV_ID_58802_VF)
+ id == BROADCOM_DEV_ID_58802_VF ||
+ id == BROADCOM_DEV_ID_57500_VF)
return true;
return false;
}
return rc;
}
+static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg,
+ uint32_t mem_size,
+ const char *suffix,
+ uint16_t idx)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ const struct rte_memzone *mz = NULL;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ rte_iova_t mz_phys_addr;
+ uint64_t valid_bits = 0;
+ uint32_t sz;
+ int i;
+
+ if (!mem_size)
+ return 0;
+
+ rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
+ BNXT_PAGE_SIZE;
+ rmem->page_size = BNXT_PAGE_SIZE;
+ rmem->pg_arr = ctx_pg->ctx_pg_arr;
+ rmem->dma_arr = ctx_pg->ctx_dma_arr;
+ rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
+
+ valid_bits = PTU_PTE_VALID;
+
+ if (rmem->nr_pages > 1) {
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_tbl%s_%x",
+ suffix, idx);
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ if (!mz) {
+ mz = rte_memzone_reserve_aligned(mz_name,
+ rmem->nr_pages * 8,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG,
+ BNXT_PAGE_SIZE);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ PMD_DRV_LOG(WARNING,
+ "Memzone physical address same as virtual.\n");
+ PMD_DRV_LOG(WARNING,
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
+ if (mz_phys_addr == 0) {
+ PMD_DRV_LOG(ERR,
+ "unable to map addr to phys memory\n");
+ return -ENOMEM;
+ }
+ }
+ rte_mem_lock_page(((char *)mz->addr));
+
+ rmem->pg_tbl = mz->addr;
+ rmem->pg_tbl_map = mz_phys_addr;
+ rmem->pg_tbl_mz = mz;
+ }
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x", suffix, idx);
+ mz = rte_memzone_lookup(mz_name);
+ if (!mz) {
+ mz = rte_memzone_reserve_aligned(mz_name,
+ mem_size,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_1GB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG,
+ BNXT_PAGE_SIZE);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ PMD_DRV_LOG(WARNING,
+ "Memzone physical address same as virtual.\n");
+ PMD_DRV_LOG(WARNING,
+ "Using rte_mem_virt2iova()\n");
+ for (sz = 0; sz < mem_size; sz += BNXT_PAGE_SIZE)
+ rte_mem_lock_page(((char *)mz->addr) + sz);
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
+ if (mz_phys_addr == RTE_BAD_IOVA) {
+ PMD_DRV_LOG(ERR,
+ "unable to map addr to phys memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
+ rte_mem_lock_page(((char *)mz->addr) + sz);
+ rmem->pg_arr[i] = ((char *)mz->addr) + sz;
+ rmem->dma_arr[i] = mz_phys_addr + sz;
+
+ if (rmem->nr_pages > 1) {
+ if (i == rmem->nr_pages - 2 &&
+ (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+ valid_bits |= PTU_PTE_NEXT_TO_LAST;
+ else if (i == rmem->nr_pages - 1 &&
+ (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+ valid_bits |= PTU_PTE_LAST;
+
+ rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
+ valid_bits);
+ }
+ }
+
+ rmem->mz = mz;
+ if (rmem->vmem_size)
+ rmem->vmem = (void **)mz->addr;
+ rmem->dma_arr[0] = mz_phys_addr;
+ return 0;
+}
+
+static void bnxt_free_ctx_mem(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
+ return;
+
+ bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
+ rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
+ rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
+ rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
+ rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
+ rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
+
+ for (i = 0; i < BNXT_MAX_Q; i++) {
+ if (bp->ctx->tqm_mem[i])
+ rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
+ }
+
+ rte_free(bp->ctx);
+ bp->ctx = NULL;
+}
+
+#define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1 : __min2; })
+
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1 : __max2; })
+
+#define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max)
+
+int bnxt_alloc_ctx_mem(struct bnxt *bp)
+{
+ struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_info *ctx;
+ uint32_t mem_size, ena, entries;
+ int i, rc;
+
+ rc = bnxt_hwrm_func_backing_store_qcaps(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
+ return rc;
+ }
+ ctx = bp->ctx;
+ if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
+ return 0;
+
+ ctx_pg = &ctx->qp_mem;
+ ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
+ mem_size = ctx->qp_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->srq_mem;
+ ctx_pg->entries = ctx->srq_max_l2_entries;
+ mem_size = ctx->srq_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->cq_mem;
+ ctx_pg->entries = ctx->cq_max_l2_entries;
+ mem_size = ctx->cq_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->vnic_mem;
+ ctx_pg->entries = ctx->vnic_max_vnic_entries +
+ ctx->vnic_max_ring_table_entries;
+ mem_size = ctx->vnic_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->stat_mem;
+ ctx_pg->entries = ctx->stat_max_entries;
+ mem_size = ctx->stat_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
+ if (rc)
+ return rc;
+
+ entries = ctx->qp_max_l2_entries;
+ entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
+ entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring,
+ ctx->tqm_max_entries_per_ring);
+ for (i = 0, ena = 0; i < BNXT_MAX_Q; i++) {
+ ctx_pg = ctx->tqm_mem[i];
+ /* use min tqm entries for now. */
+ ctx_pg->entries = entries;
+ mem_size = ctx->tqm_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
+ if (rc)
+ return rc;
+ ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
+ }
+
+ ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
+ rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+ if (rc)
+ PMD_DRV_LOG(ERR,
+ "Failed to configure context mem: rc = %d\n", rc);
+ else
+ ctx->flags |= BNXT_CTX_FLAG_INITED;
+
+ return 0;
+}
#define ALLOW_FUNC(x) \
{ \
- typeof(x) arg = (x); \
+ uint32_t arg = (x); \
bp->pf.vf_req_fwd[((arg) >> 5)] &= \
~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
}
uint32_t total_alloc_len;
rte_iova_t mz_phys_addr;
struct bnxt *bp;
+ uint16_t mtu;
int rc;
if (version_printed++ == 0)
if (bnxt_vf_pciid(pci_dev->id.device_id))
bp->flags |= BNXT_FLAG_VF;
+ if (pci_dev->id.device_id == BROADCOM_DEV_ID_57508 ||
+ pci_dev->id.device_id == BROADCOM_DEV_ID_57504 ||
+ pci_dev->id.device_id == BROADCOM_DEV_ID_57502 ||
+ pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF)
+ bp->flags |= BNXT_FLAG_THOR_CHIP;
+
rc = bnxt_init_board(eth_dev);
if (rc) {
PMD_DRV_LOG(ERR,
goto skip_ext_stats;
bp->hw_rx_port_stats_ext = (void *)
- (bp->hw_rx_port_stats + sizeof(struct rx_port_stats));
+ ((uint8_t *)bp->hw_rx_port_stats +
+ sizeof(struct rx_port_stats));
bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
sizeof(struct rx_port_stats);
bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2) {
bp->hw_tx_port_stats_ext = (void *)
- (bp->hw_tx_port_stats + sizeof(struct tx_port_stats));
+ ((uint8_t *)bp->hw_tx_port_stats +
+ sizeof(struct tx_port_stats));
bp->hw_tx_port_stats_ext_map =
bp->hw_tx_port_stats_map +
sizeof(struct tx_port_stats);
rc = bnxt_hwrm_ver_get(bp);
if (rc)
goto error_free;
- rc = bnxt_hwrm_queue_qportcfg(bp);
+
+ rc = bnxt_hwrm_func_reset(bp);
if (rc) {
- PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
+ PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
+ rc = -EIO;
goto error_free;
}
- rc = bnxt_hwrm_func_qcfg(bp);
+ rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
- PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
+ PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
goto error_free;
}
-
/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
- if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
+ /* THOR does not support ring groups.
+ * But we will use the array to save RSS context IDs.
+ */
+ if (BNXT_CHIP_THOR(bp)) {
+ bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
+ } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
/* 1 ring is for default completion ring */
PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
rc = -ENOSPC;
pci_dev->mem_resource[0].phys_addr,
pci_dev->mem_resource[0].addr);
- rc = bnxt_hwrm_func_reset(bp);
+ rc = bnxt_hwrm_func_qcfg(bp, &mtu);
if (rc) {
- PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
- rc = -EIO;
+ PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
goto error_free;
}
+ if (mtu >= RTE_ETHER_MIN_MTU && mtu <= BNXT_MAX_MTU &&
+ mtu != eth_dev->data->mtu)
+ eth_dev->data->mtu = mtu;
+
if (BNXT_PF(bp)) {
//if (bp->pf.active_vfs) {
// TODO: Deallocate VF resources?
bnxt_dev_close_op(eth_dev);
if (bp->pf.vf_info)
rte_free(bp->pf.vf_info);
+ bnxt_free_ctx_mem(bp);
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;