struct bnxt_error_recovery_info *recovery_info;
};
+int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu);
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);
int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);
int is_bnxt_in_error(struct bnxt *bp);
static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
-static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu);
static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev);
static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev);
int vlan_mask = 0;
int rc;
+ if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Queues are not configured yet!\n");
+ return -EINVAL;
+ }
+
if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
PMD_DRV_LOG(ERR,
"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
-static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
+int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
{
struct bnxt *bp = eth_dev->data->dev_private;
uint32_t new_pkt_size;
if (rc)
return rc;
+ /* Exit if receive queues are not configured yet */
+ if (!eth_dev->data->nb_rx_queues)
+ return rc;
+
new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
VLAN_TAG_SIZE * BNXT_NUM_VLANS;
bp->flags &= ~BNXT_FLAG_JUMBO;
}
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
+ /* Is there a change in mtu setting? */
+ if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
+ return rc;
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
}
}
+ if (!rc)
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
+
PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
return rc;