if (BNXT_CHIP_P5(bp))
bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5;
- rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
- if (rc) {
- PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
- goto err_out;
- }
-
rc = bnxt_alloc_hwrm_rings(bp);
if (rc) {
PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
goto err_out;
}
+ for (j = 0; j < bp->rx_nr_rings; j++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[j];
+
+ if (!rxq->rx_deferred_start) {
+ bp->eth_dev->data->rx_queue_state[j] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->rx_started = true;
+ }
+ }
+
/* default vnic 0 */
rc = bnxt_setup_one_vnic(bp, 0);
if (rc)
}
}
- for (j = 0; j < bp->rx_nr_rings; j++) {
- struct bnxt_rx_queue *rxq = bp->rx_queues[j];
-
- if (!rxq->rx_deferred_start) {
- bp->eth_dev->data->rx_queue_state[j] =
- RTE_ETH_QUEUE_STATE_STARTED;
- rxq->rx_started = true;
- }
- }
-
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
if (rc) {
PMD_DRV_LOG(ERR,
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+ dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
int ret;
eth_dev->data->dev_started = 0;
- eth_dev->data->scattered_rx = 0;
/* Prevent crashes when queues are still in use */
eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
if (BNXT_FLOW_XSTATS_EN(bp))
bp->flow_stat->flow_count = 0;
+ eth_dev->data->scattered_rx = 0;
+
return 0;
}