net/ena: fix Rx checksum errors statistics
[dpdk.git] / drivers / net / bnxt / bnxt_ethdev.c
index 6d2a672..61ae799 100644 (file)
@@ -441,7 +441,7 @@ static int bnxt_init_nic(struct bnxt *bp)
 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
                                  struct rte_eth_dev_info *dev_info)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        uint16_t max_vnics, i, j, vpool, vrxq;
        unsigned int max_rx_rings;
 
@@ -537,7 +537,7 @@ found:
 /* Configure the device based on the configuration provided */
 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
        int rc;
 
@@ -618,15 +618,90 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
                        eth_dev->data->port_id);
 }
 
-static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
+/*
+ * Determine whether the current configuration requires support for scattered
+ * receive; return 1 if scattered receive is required and 0 if not.
+ */
+static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
 {
-       bnxt_print_link_info(eth_dev);
+       uint16_t buf_size;
+       int i;
+
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+               struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
+
+               buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+                                     RTE_PKTMBUF_HEADROOM);
+               if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
+                       return 1;
+       }
        return 0;
 }
 
+static eth_rx_burst_t
+bnxt_receive_function(__rte_unused struct rte_eth_dev *eth_dev)
+{
+#ifdef RTE_ARCH_X86
+       /*
+        * Vector mode receive can be enabled only if scatter rx is not
+        * in use and rx offloads are limited to VLAN stripping and
+        * CRC stripping.
+        */
+       if (!eth_dev->data->scattered_rx &&
+           !(eth_dev->data->dev_conf.rxmode.offloads &
+             ~(DEV_RX_OFFLOAD_VLAN_STRIP |
+               DEV_RX_OFFLOAD_KEEP_CRC |
+               DEV_RX_OFFLOAD_JUMBO_FRAME |
+               DEV_RX_OFFLOAD_IPV4_CKSUM |
+               DEV_RX_OFFLOAD_UDP_CKSUM |
+               DEV_RX_OFFLOAD_TCP_CKSUM |
+               DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+               DEV_RX_OFFLOAD_VLAN_FILTER))) {
+               PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
+                           eth_dev->data->port_id);
+               return bnxt_recv_pkts_vec;
+       }
+       PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
+                   eth_dev->data->port_id);
+       PMD_DRV_LOG(INFO,
+                   "Port %d scatter: %d rx offload: %" PRIX64 "\n",
+                   eth_dev->data->port_id,
+                   eth_dev->data->scattered_rx,
+                   eth_dev->data->dev_conf.rxmode.offloads);
+#endif
+       return bnxt_recv_pkts;
+}
+
+static eth_tx_burst_t
+bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
+{
+#ifdef RTE_ARCH_X86
+       /*
+        * Vector mode receive can be enabled only if scatter tx is not
+        * in use and tx offloads other than VLAN insertion are not
+        * in use.
+        */
+       if (!eth_dev->data->scattered_rx &&
+           !(eth_dev->data->dev_conf.txmode.offloads &
+             ~DEV_TX_OFFLOAD_VLAN_INSERT)) {
+               PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
+                           eth_dev->data->port_id);
+               return bnxt_xmit_pkts_vec;
+       }
+       PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
+                   eth_dev->data->port_id);
+       PMD_DRV_LOG(INFO,
+                   "Port %d scatter: %d tx offload: %" PRIX64 "\n",
+                   eth_dev->data->port_id,
+                   eth_dev->data->scattered_rx,
+                   eth_dev->data->dev_conf.txmode.offloads);
+#endif
+       return bnxt_xmit_pkts;
+}
+
 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
        int vlan_mask = 0;
        int rc;
@@ -642,6 +717,8 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
        if (rc)
                goto error;
 
+       eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
+
        bnxt_link_update_op(eth_dev, 1);
 
        if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
@@ -652,6 +729,8 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
        if (rc)
                goto error;
 
+       eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
+       eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
        bp->flags |= BNXT_FLAG_INIT_DONE;
        return 0;
 
@@ -664,7 +743,7 @@ error:
 
 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        int rc = 0;
 
        if (!bp->link_info.link_up)
@@ -678,7 +757,7 @@ static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
 
 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
 
        eth_dev->data->dev_link.link_status = 0;
        bnxt_set_hwrm_link_config(bp, false);
@@ -690,7 +769,7 @@ static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
 /* Unload the driver, release resources */
 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
 
        bp->flags &= ~BNXT_FLAG_INIT_DONE;
        if (bp->eth_dev->data->dev_started) {
@@ -707,7 +786,7 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
 
 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
 
        if (bp->dev_stopped == 0)
                bnxt_dev_stop_op(eth_dev);
@@ -727,7 +806,7 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
                                    uint32_t index)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
        struct bnxt_vnic_info *vnic;
        struct bnxt_filter_info *filter, *temp_filter;
@@ -763,7 +842,7 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
                                struct rte_ether_addr *mac_addr,
                                uint32_t index, uint32_t pool)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
        struct bnxt_filter_info *filter;
 
@@ -798,7 +877,7 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
 {
        int rc = 0;
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        struct rte_eth_link new;
        unsigned int cnt = BNXT_LINK_WAIT_CNT;
 
@@ -838,7 +917,7 @@ out:
 
 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        struct bnxt_vnic_info *vnic;
 
        if (bp->vnic_info == NULL)
@@ -852,7 +931,7 @@ static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
 
 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        struct bnxt_vnic_info *vnic;
 
        if (bp->vnic_info == NULL)
@@ -866,7 +945,7 @@ static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
 
 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        struct bnxt_vnic_info *vnic;
 
        if (bp->vnic_info == NULL)
@@ -880,7 +959,7 @@ static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
 
 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        struct bnxt_vnic_info *vnic;
 
        if (bp->vnic_info == NULL)
@@ -892,30 +971,72 @@ static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
        bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
 }
 
+/* Return bnxt_rx_queue pointer corresponding to a given rxq. */
+static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
+{
+       if (qid >= bp->rx_nr_rings)
+               return NULL;
+
+       return bp->eth_dev->data->rx_queues[qid];
+}
+
+/* Return rxq corresponding to a given rss table ring/group ID. */
+static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
+{
+       unsigned int i;
+
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               if (bp->grp_info[i].fw_grp_id == fwr)
+                       return i;
+       }
+
+       return INVALID_HW_RING_ID;
+}
+
 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
                            struct rte_eth_rss_reta_entry64 *reta_conf,
                            uint16_t reta_size)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
-       struct bnxt_vnic_info *vnic;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+       uint16_t tbl_size = HW_HASH_INDEX_SIZE;
+       uint16_t idx, sft;
        int i;
 
+       if (!vnic->rss_table)
+               return -EINVAL;
+
        if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
                return -EINVAL;
 
-       if (reta_size != HW_HASH_INDEX_SIZE) {
+       if (reta_size != tbl_size) {
                PMD_DRV_LOG(ERR, "The configured hash table lookup size "
                        "(%d) must equal the size supported by the hardware "
-                       "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+                       "(%d)\n", reta_size, tbl_size);
                return -EINVAL;
        }
-       /* Update the RSS VNIC(s) */
-       for (i = 0; i < bp->max_vnics; i++) {
-               vnic = &bp->vnic_info[i];
-               memcpy(vnic->rss_table, reta_conf, reta_size);
-               bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+
+       for (i = 0; i < reta_size; i++) {
+               struct bnxt_rx_queue *rxq;
+
+               idx = i / RTE_RETA_GROUP_SIZE;
+               sft = i % RTE_RETA_GROUP_SIZE;
+
+               if (!(reta_conf[idx].mask & (1ULL << sft)))
+                       continue;
+
+               rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
+               if (!rxq) {
+                       PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
+                       return -EINVAL;
+               }
+
+               vnic->rss_table[i] =
+                   vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
        }
+
+       bnxt_hwrm_vnic_rss_cfg(bp, vnic);
        return 0;
 }
 
@@ -923,10 +1044,10 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
                              struct rte_eth_rss_reta_entry64 *reta_conf,
                              uint16_t reta_size)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
-       struct rte_intr_handle *intr_handle
-               = &bp->pdev->intr_handle;
+       uint16_t tbl_size = HW_HASH_INDEX_SIZE;
+       uint16_t idx, sft, i;
 
        /* Retrieve from the default VNIC */
        if (!vnic)
@@ -934,18 +1055,28 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
        if (!vnic->rss_table)
                return -EINVAL;
 
-       if (reta_size != HW_HASH_INDEX_SIZE) {
+       if (reta_size != tbl_size) {
                PMD_DRV_LOG(ERR, "The configured hash table lookup size "
                        "(%d) must equal the size supported by the hardware "
-                       "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+                       "(%d)\n", reta_size, tbl_size);
                return -EINVAL;
        }
-       /* EW - need to revisit here copying from uint64_t to uint16_t */
-       memcpy(reta_conf, vnic->rss_table, reta_size);
 
-       if (rte_intr_allow_others(intr_handle)) {
-               if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
-                       bnxt_dev_lsc_intr_setup(eth_dev);
+       for (idx = 0, i = 0; i < reta_size; i++) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               sft = i % RTE_RETA_GROUP_SIZE;
+
+               if (reta_conf[idx].mask & (1ULL << sft)) {
+                       uint16_t qid;
+
+                       qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
+
+                       if (qid == INVALID_HW_RING_ID) {
+                               PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
+                               return -EINVAL;
+                       }
+                       reta_conf[idx].reta[sft] = qid;
+               }
        }
 
        return 0;
@@ -954,7 +1085,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
                                   struct rte_eth_rss_conf *rss_conf)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
        struct bnxt_vnic_info *vnic;
        uint16_t hash_type = 0;
@@ -1010,7 +1141,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
                                     struct rte_eth_rss_conf *rss_conf)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
        int len;
        uint32_t hash_types;
@@ -1068,7 +1199,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
                               struct rte_eth_fc_conf *fc_conf)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        struct rte_eth_link link_info;
        int rc;
 
@@ -1100,7 +1231,7 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
                               struct rte_eth_fc_conf *fc_conf)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
 
        if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
                PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
@@ -1156,7 +1287,7 @@ static int
 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
                         struct rte_eth_udp_tunnel *udp_tunnel)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        uint16_t tunnel_type = 0;
        int rc = 0;
 
@@ -1204,7 +1335,7 @@ static int
 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
                         struct rte_eth_udp_tunnel *udp_tunnel)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        uint16_t tunnel_type = 0;
        uint16_t port = 0;
        int rc = 0;
@@ -1404,7 +1535,7 @@ exit:
 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
                uint16_t vlan_id, int on)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
 
        /* These operations apply to ALL existing MAC/VLAN filters */
        if (on)
@@ -1416,7 +1547,7 @@ static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
 static int
 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
        unsigned int i;
 
@@ -1454,7 +1585,7 @@ static int
 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
                        struct rte_ether_addr *addr)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        /* Default Filter is tied to VNIC 0 */
        struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
        struct bnxt_filter_info *filter;
@@ -1493,7 +1624,7 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
                          struct rte_ether_addr *mc_addr_set,
                          uint32_t nb_mc_addr)
 {
-       struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       struct bnxt *bp = eth_dev->data->dev_private;
        char *mc_addr_list = (char *)mc_addr_set;
        struct bnxt_vnic_info *vnic;
        uint32_t off = 0, i = 0;
@@ -1522,7 +1653,7 @@ allmulti:
 static int
 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
        uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
        uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
@@ -1578,9 +1709,13 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
 {
        struct bnxt *bp = eth_dev->data->dev_private;
        struct rte_eth_dev_info dev_info;
+       uint32_t new_pkt_size;
        uint32_t rc = 0;
        uint32_t i;
 
+       new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
+                      VLAN_TAG_SIZE * BNXT_NUM_VLANS;
+
        bnxt_dev_info_get_op(eth_dev, &dev_info);
 
        if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
@@ -1589,6 +1724,23 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
                return -EINVAL;
        }
 
+#ifdef RTE_ARCH_X86
+       /*
+        * If vector-mode tx/rx is active, disallow any MTU change that would
+        * require scattered receive support.
+        */
+       if (eth_dev->data->dev_started &&
+           (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec ||
+            eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) &&
+           (new_pkt_size >
+            eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+               PMD_DRV_LOG(ERR,
+                           "MTU change would require scattered rx support. ");
+               PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
+               return -EINVAL;
+       }
+#endif
+
        if (new_mtu > RTE_ETHER_MTU) {
                bp->flags |= BNXT_FLAG_JUMBO;
                bp->eth_dev->data->dev_conf.rxmode.offloads |=
@@ -1599,9 +1751,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
                bp->flags &= ~BNXT_FLAG_JUMBO;
        }
 
-       eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
-               new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
-               VLAN_TAG_SIZE * 2;
+       eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
 
        eth_dev->data->mtu = new_mtu;
        PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
@@ -1632,7 +1782,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
 static int
 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        uint16_t vlan = bp->vlan;
        int rc;
 
@@ -1652,7 +1802,7 @@ bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
 static int
 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
 
        return bnxt_hwrm_port_led_cfg(bp, true);
 }
@@ -1660,7 +1810,7 @@ bnxt_dev_led_on_op(struct rte_eth_dev *dev)
 static int
 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
 
        return bnxt_hwrm_port_led_cfg(bp, false);
 }
@@ -1852,7 +2002,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
                        enum rte_filter_op filter_op,
                        void *arg)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        struct rte_eth_ethertype_filter *efilter =
                        (struct rte_eth_ethertype_filter *)arg;
        struct bnxt_filter_info *bfilter, *filter1;
@@ -2156,7 +2306,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
                        enum rte_filter_op filter_op,
                        void *arg)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        int ret;
 
        if (filter_op == RTE_ETH_FILTER_NOP)
@@ -2473,7 +2623,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
                 enum rte_filter_op filter_op,
                 void *arg)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
        struct bnxt_filter_info *filter, *match;
        struct bnxt_vnic_info *vnic, *mvnic;
@@ -2636,9 +2786,10 @@ bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
                RTE_PTYPE_UNKNOWN
        };
 
-       if (dev->rx_pkt_burst == bnxt_recv_pkts)
-               return ptypes;
-       return NULL;
+       if (!dev->rx_pkt_burst)
+               return NULL;
+
+       return ptypes;
 }
 
 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
@@ -2759,7 +2910,7 @@ static int
 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
 {
        uint64_t ns;
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
 
        if (!ptp)
@@ -2776,7 +2927,7 @@ static int
 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
 {
        uint64_t ns, systime_cycles;
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
 
        if (!ptp)
@@ -2791,7 +2942,7 @@ bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
 static int
 bnxt_timesync_enable(struct rte_eth_dev *dev)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
        uint32_t shift = 0;
 
@@ -2827,7 +2978,7 @@ bnxt_timesync_enable(struct rte_eth_dev *dev)
 static int
 bnxt_timesync_disable(struct rte_eth_dev *dev)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
 
        if (!ptp)
@@ -2849,7 +3000,7 @@ bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
                                 struct timespec *timestamp,
                                 uint32_t flags __rte_unused)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
        uint64_t rx_tstamp_cycles = 0;
        uint64_t ns;
@@ -2867,7 +3018,7 @@ static int
 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
                                 struct timespec *timestamp)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
        uint64_t tx_tstamp_cycles = 0;
        uint64_t ns;
@@ -2885,7 +3036,7 @@ bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
 static int
 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
 
        if (!ptp)
@@ -2899,7 +3050,7 @@ bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
 static int
 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        int rc;
        uint32_t dir_entries;
        uint32_t entry_length;
@@ -2919,7 +3070,7 @@ static int
 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
                struct rte_dev_eeprom_info *in_eeprom)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        uint32_t index;
        uint32_t offset;
 
@@ -2990,7 +3141,7 @@ static int
 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
                struct rte_dev_eeprom_info *in_eeprom)
 {
-       struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+       struct bnxt *bp = dev->data->dev_private;
        uint8_t index, dir_op;
        uint16_t type, ext, ordinal, attr;