dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
+ dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
return rc;
}
-static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
int rc;
struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
cpr = rxq->cp_ring;
}
- rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
-
- if (rc)
- return rc;
+ if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE) {
+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
+ if (rc)
+ return rc;
+ }
}
return rc;
}
bp->grp_info[queue_index].ag_fw_ring_id =
INVALID_HW_RING_ID;
}
+
+ if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
+ bnxt_hwrm_stat_ctx_free(bp, cpr);
+ cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+ }
+
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
bnxt_free_cp_ring(bp, cpr);
bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
struct bnxt_rx_queue **rxqs = bp->rx_queues;
uint16_t *ring_tbl = vnic->rss_table;
/* Find next active ring. */
for (cnt = 0; cnt < max_rings; cnt++) {
- if (rx_queue_state[k] !=
- RTE_ETH_QUEUE_STATE_STOPPED)
+ if (rxqs[k]->rx_started)
break;
if (++k == max_rings)
k = 0;
return rc;
}
+
+void bnxt_free_hwrm_tx_ring(struct bnxt *bp, int queue_index)
+{
+ struct bnxt_tx_queue *txq = bp->tx_queues[queue_index];
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_TX,
+ cpr->cp_ring_struct->fw_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+
+ if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
+ bnxt_hwrm_stat_ctx_free(bp, cpr);
+ cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+ }
+
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_free_cp_ring(bp, cpr);
+ cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+ }
+}
int bnxt_hwrm_read_sfp_module_eeprom_info(struct bnxt *bp, uint16_t i2c_addr,
uint16_t page_number, uint16_t start_addr,
uint16_t data_length, uint8_t *buf);
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
+void bnxt_free_hwrm_tx_ring(struct bnxt *bp, int queue_index);
+int bnxt_alloc_hwrm_tx_ring(struct bnxt *bp, int queue_index);
#endif
if (rc)
goto err_out;
+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
+ if (rc)
+ goto err_out;
+
if (BNXT_HAS_RING_GRPS(bp)) {
bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
return bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL,
NULL, bp->async_cp_ring, NULL, "def_cp");
}
+
+int bnxt_alloc_hwrm_tx_ring(struct bnxt *bp, int queue_index)
+{
+ struct bnxt_tx_queue *txq = bp->tx_queues[queue_index];
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+ unsigned int idx = queue_index + bp->rx_cp_nr_rings;
+ uint16_t tx_cosq_id = 0;
+ struct bnxt_coal coal;
+ int rc = 0;
+
+ rc = bnxt_alloc_cmpl_ring(bp, idx, cpr);
+ if (rc)
+ goto err_out;
+
+ bnxt_init_dflt_coal(&coal);
+ bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
+
+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
+ if (rc)
+ goto err_out;
+
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
+ tx_cosq_id = bp->tx_cosq_id[queue_index < bp->max_lltc ? queue_index : 0];
+ else
+ tx_cosq_id = bp->tx_cosq_id[0];
+
+ rc = bnxt_hwrm_ring_alloc(bp, ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
+ queue_index, cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id,
+ tx_cosq_id);
+ if (rc)
+ goto err_out;
+
+ bnxt_set_db(bp, &txr->tx_db, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
+ queue_index, ring->fw_ring_id,
+ ring->ring_mask);
+ txq->index = idx;
+
+ return rc;
+err_out:
+ bnxt_free_hwrm_tx_ring(bp, queue_index);
+ return rc;
+}
if (is_bnxt_in_error(rxq->bp))
return;
+ bnxt_free_hwrm_rx_ring(rxq->bp, rxq->queue_id);
bnxt_rx_queue_release_mbufs(rxq);
/* Free RX ring hardware descriptors */
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
struct bnxt_rx_queue *rxq;
int rc = 0;
- uint8_t queue_state;
rc = is_bnxt_in_error(bp);
if (rc)
else
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
- if (rxq->rx_deferred_start) {
- queue_state = RTE_ETH_QUEUE_STATE_STOPPED;
- rxq->rx_started = false;
- } else {
- queue_state = RTE_ETH_QUEUE_STATE_STARTED;
- rxq->rx_started = true;
- }
- eth_dev->data->rx_queue_state[queue_idx] = queue_state;
+ rxq->rx_started = rxq->rx_deferred_start ? false : true;
+ rxq->vnic = BNXT_GET_DEFAULT_VNIC(bp);
/* Configure mtu if it is different from what was configured before */
if (!queue_idx)
#include <rte_malloc.h>
#include "bnxt.h"
+#include "bnxt_hwrm.h"
#include "bnxt_ring.h"
#include "bnxt_txq.h"
#include "bnxt_txr.h"
return;
/* Free TX ring hardware descriptors */
+ bnxt_free_hwrm_tx_ring(txq->bp, txq->queue_id);
bnxt_tx_queue_release_mbufs(txq);
if (txq->tx_ring) {
bnxt_free_ring(txq->tx_ring->tx_ring_struct);
#include <rte_malloc.h>
#include "bnxt.h"
+#include "bnxt_hwrm.h"
#include "bnxt_ring.h"
#include "bnxt_txq.h"
#include "bnxt_txr.h"
if (rc)
return rc;
+ bnxt_free_hwrm_tx_ring(bp, tx_queue_id);
+ rc = bnxt_alloc_hwrm_tx_ring(bp, tx_queue_id);
+ if (rc)
+ return rc;
+
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
txq->tx_started = true;
PMD_DRV_LOG(DEBUG, "Tx queue started\n");