static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *dev_info)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
uint16_t max_vnics, i, j, vpool, vrxq;
unsigned int max_rx_rings;
/* Configure the device based on the configuration provided */
static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
int rc;
eth_dev->data->port_id);
}
-static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
-{
- bnxt_print_link_info(eth_dev);
- return 0;
-}
-
/*
* Determine whether the current configuration requires support for scattered
* receive; return 1 if scattered receive is required and 0 if not.
return 0;
}
+static eth_rx_burst_t
+bnxt_receive_function(__rte_unused struct rte_eth_dev *eth_dev)
+{
+#ifdef RTE_ARCH_X86
+ /*
+ * Vector mode receive can be enabled only if scatter rx is not
+ * in use and rx offloads are limited to VLAN stripping and
+ * CRC stripping.
+ */
+ if (!eth_dev->data->scattered_rx &&
+ !(eth_dev->data->dev_conf.rxmode.offloads &
+ ~(DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_VLAN_FILTER))) {
+ PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
+ eth_dev->data->port_id);
+ return bnxt_recv_pkts_vec;
+ }
+ PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
+ eth_dev->data->port_id);
+ PMD_DRV_LOG(INFO,
+ "Port %d scatter: %d rx offload: %" PRIX64 "\n",
+ eth_dev->data->port_id,
+ eth_dev->data->scattered_rx,
+ eth_dev->data->dev_conf.rxmode.offloads);
+#endif
+ return bnxt_recv_pkts;
+}
+
+static eth_tx_burst_t
+bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
+{
+#ifdef RTE_ARCH_X86
+ /*
+ * Vector mode receive can be enabled only if scatter tx is not
+ * in use and tx offloads other than VLAN insertion are not
+ * in use.
+ */
+ if (!eth_dev->data->scattered_rx &&
+ !(eth_dev->data->dev_conf.txmode.offloads &
+ ~DEV_TX_OFFLOAD_VLAN_INSERT)) {
+ PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
+ eth_dev->data->port_id);
+ return bnxt_xmit_pkts_vec;
+ }
+ PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
+ eth_dev->data->port_id);
+ PMD_DRV_LOG(INFO,
+ "Port %d scatter: %d tx offload: %" PRIX64 "\n",
+ eth_dev->data->port_id,
+ eth_dev->data->scattered_rx,
+ eth_dev->data->dev_conf.txmode.offloads);
+#endif
+ return bnxt_xmit_pkts;
+}
+
static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
int vlan_mask = 0;
int rc;
if (rc)
goto error;
+ eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
+ eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
bp->flags |= BNXT_FLAG_INIT_DONE;
return 0;
static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
int rc = 0;
if (!bp->link_info.link_up)
static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
eth_dev->data->dev_link.link_status = 0;
bnxt_set_hwrm_link_config(bp, false);
/* Unload the driver, release resources */
static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
bp->flags &= ~BNXT_FLAG_INIT_DONE;
if (bp->eth_dev->data->dev_started) {
static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
if (bp->dev_stopped == 0)
bnxt_dev_stop_op(eth_dev);
static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
uint32_t index)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
struct bnxt_vnic_info *vnic;
struct bnxt_filter_info *filter, *temp_filter;
struct rte_ether_addr *mac_addr,
uint32_t index, uint32_t pool)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
struct bnxt_filter_info *filter;
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
{
int rc = 0;
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_link new;
unsigned int cnt = BNXT_LINK_WAIT_CNT;
static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic;
if (bp->vnic_info == NULL)
static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic;
if (bp->vnic_info == NULL)
static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic;
if (bp->vnic_info == NULL)
static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic;
if (bp->vnic_info == NULL)
bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
}
+/* Return bnxt_rx_queue pointer corresponding to a given rxq. */
+static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
+{
+ if (qid >= bp->rx_nr_rings)
+ return NULL;
+
+ return bp->eth_dev->data->rx_queues[qid];
+}
+
+/* Return rxq corresponding to a given rss table ring/group ID. */
+static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
+{
+ unsigned int i;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ if (bp->grp_info[i].fw_grp_id == fwr)
+ return i;
+ }
+
+ return INVALID_HW_RING_ID;
+}
+
static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
- struct bnxt_vnic_info *vnic;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ uint16_t tbl_size = HW_HASH_INDEX_SIZE;
+ uint16_t idx, sft;
int i;
+ if (!vnic->rss_table)
+ return -EINVAL;
+
if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
return -EINVAL;
- if (reta_size != HW_HASH_INDEX_SIZE) {
+ if (reta_size != tbl_size) {
PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
- "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+ "(%d)\n", reta_size, tbl_size);
return -EINVAL;
}
- /* Update the RSS VNIC(s) */
- for (i = 0; i < bp->max_vnics; i++) {
- vnic = &bp->vnic_info[i];
- memcpy(vnic->rss_table, reta_conf, reta_size);
- bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+
+ for (i = 0; i < reta_size; i++) {
+ struct bnxt_rx_queue *rxq;
+
+ idx = i / RTE_RETA_GROUP_SIZE;
+ sft = i % RTE_RETA_GROUP_SIZE;
+
+ if (!(reta_conf[idx].mask & (1ULL << sft)))
+ continue;
+
+ rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
+ return -EINVAL;
+ }
+
+ vnic->rss_table[i] =
+ vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
}
+
+ bnxt_hwrm_vnic_rss_cfg(bp, vnic);
return 0;
}
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
- struct rte_intr_handle *intr_handle
- = &bp->pdev->intr_handle;
+ uint16_t tbl_size = HW_HASH_INDEX_SIZE;
+ uint16_t idx, sft, i;
/* Retrieve from the default VNIC */
if (!vnic)
if (!vnic->rss_table)
return -EINVAL;
- if (reta_size != HW_HASH_INDEX_SIZE) {
+ if (reta_size != tbl_size) {
PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
- "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+ "(%d)\n", reta_size, tbl_size);
return -EINVAL;
}
- /* EW - need to revisit here copying from uint64_t to uint16_t */
- memcpy(reta_conf, vnic->rss_table, reta_size);
- if (rte_intr_allow_others(intr_handle)) {
- if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
- bnxt_dev_lsc_intr_setup(eth_dev);
+ for (idx = 0, i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ sft = i % RTE_RETA_GROUP_SIZE;
+
+ if (reta_conf[idx].mask & (1ULL << sft)) {
+ uint16_t qid;
+
+ qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
+
+ if (qid == INVALID_HW_RING_ID) {
+ PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
+ return -EINVAL;
+ }
+ reta_conf[idx].reta[sft] = qid;
+ }
}
return 0;
static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_vnic_info *vnic;
uint16_t hash_type = 0;
static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
int len;
uint32_t hash_types;
static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct rte_eth_link link_info;
int rc;
static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
struct rte_eth_udp_tunnel *udp_tunnel)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
uint16_t tunnel_type = 0;
int rc = 0;
bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
struct rte_eth_udp_tunnel *udp_tunnel)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
uint16_t tunnel_type = 0;
uint16_t port = 0;
int rc = 0;
static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
uint16_t vlan_id, int on)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
/* These operations apply to ALL existing MAC/VLAN filters */
if (on)
static int
bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
unsigned int i;
bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
struct rte_ether_addr *addr)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
/* Default Filter is tied to VNIC 0 */
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
struct bnxt_filter_info *filter;
struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
char *mc_addr_list = (char *)mc_addr_set;
struct bnxt_vnic_info *vnic;
uint32_t off = 0, i = 0;
static int
bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
{
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_dev_info dev_info;
+ uint32_t new_pkt_size;
uint32_t rc = 0;
uint32_t i;
+ new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
+ VLAN_TAG_SIZE * BNXT_NUM_VLANS;
+
bnxt_dev_info_get_op(eth_dev, &dev_info);
if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
return -EINVAL;
}
+#ifdef RTE_ARCH_X86
+ /*
+ * If vector-mode tx/rx is active, disallow any MTU change that would
+ * require scattered receive support.
+ */
+ if (eth_dev->data->dev_started &&
+ (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec ||
+ eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) &&
+ (new_pkt_size >
+ eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+ PMD_DRV_LOG(ERR,
+ "MTU change would require scattered rx support. ");
+ PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
+ return -EINVAL;
+ }
+#endif
+
if (new_mtu > RTE_ETHER_MTU) {
bp->flags |= BNXT_FLAG_JUMBO;
bp->eth_dev->data->dev_conf.rxmode.offloads |=
bp->flags &= ~BNXT_FLAG_JUMBO;
}
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
- new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
- VLAN_TAG_SIZE * 2;
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
eth_dev->data->mtu = new_mtu;
PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
static int
bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
uint16_t vlan = bp->vlan;
int rc;
static int
bnxt_dev_led_on_op(struct rte_eth_dev *dev)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
return bnxt_hwrm_port_led_cfg(bp, true);
}
static int
bnxt_dev_led_off_op(struct rte_eth_dev *dev)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
return bnxt_hwrm_port_led_cfg(bp, false);
}
enum rte_filter_op filter_op,
void *arg)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct rte_eth_ethertype_filter *efilter =
(struct rte_eth_ethertype_filter *)arg;
struct bnxt_filter_info *bfilter, *filter1;
enum rte_filter_op filter_op,
void *arg)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
int ret;
if (filter_op == RTE_ETH_FILTER_NOP)
enum rte_filter_op filter_op,
void *arg)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg;
struct bnxt_filter_info *filter, *match;
struct bnxt_vnic_info *vnic, *mvnic;
RTE_PTYPE_UNKNOWN
};
- if (dev->rx_pkt_burst == bnxt_recv_pkts)
- return ptypes;
- return NULL;
+ if (!dev->rx_pkt_burst)
+ return NULL;
+
+ return ptypes;
}
static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
{
uint64_t ns;
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
{
uint64_t ns, systime_cycles;
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
static int
bnxt_timesync_enable(struct rte_eth_dev *dev)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
uint32_t shift = 0;
static int
bnxt_timesync_disable(struct rte_eth_dev *dev)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
struct timespec *timestamp,
uint32_t flags __rte_unused)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
uint64_t rx_tstamp_cycles = 0;
uint64_t ns;
bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
struct timespec *timestamp)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
uint64_t tx_tstamp_cycles = 0;
uint64_t ns;
static int
bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
static int
bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
int rc;
uint32_t dir_entries;
uint32_t entry_length;
bnxt_get_eeprom_op(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *in_eeprom)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
uint32_t index;
uint32_t offset;
bnxt_set_eeprom_op(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *in_eeprom)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
uint8_t index, dir_op;
uint16_t type, ext, ordinal, attr;