#include "bnxt_filter.h"
#include "bnxt_hwrm.h"
#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
#include "bnxt_ring.h"
#include "bnxt_txq.h"
+#include "bnxt_txr.h"
#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
break;
case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
req.ring_type = ring_type;
- req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_POLL;
+ /*
+ * TODO: Some HWRM versions crash with
+ * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
+ */
+ req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
req.length = rte_cpu_to_le_32(ring->ring_size);
break;
default:
return rc;
}
+static void bnxt_free_cp_ring(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr, unsigned int idx)
+{
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+
+ bnxt_hwrm_ring_free(bp, cp_ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
+ cp_ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
+ memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
+ sizeof(*cpr->cp_desc_ring));
+ cpr->cp_raw_cons = 0;
+}
+
+int bnxt_free_all_hwrm_rings(struct bnxt *bp)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ unsigned int idx = bp->rx_cp_nr_rings + i + 1;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_TX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ memset(txr->tx_desc_ring, 0,
+ txr->tx_ring_struct->ring_size *
+ sizeof(*txr->tx_desc_ring));
+ memset(txr->tx_buf_ring, 0,
+ txr->tx_ring_struct->ring_size *
+ sizeof(*txr->tx_buf_ring));
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+ }
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ bnxt_free_cp_ring(bp, cpr, idx);
+ }
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_ring *ring = rxr->rx_ring_struct;
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ unsigned int idx = i + 1;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+ memset(rxr->rx_desc_ring, 0,
+ rxr->rx_ring_struct->ring_size *
+ sizeof(*rxr->rx_desc_ring));
+ memset(rxr->rx_buf_ring, 0,
+ rxr->rx_ring_struct->ring_size *
+ sizeof(*rxr->rx_buf_ring));
+ rxr->rx_prod = 0;
+ }
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ bnxt_free_cp_ring(bp, cpr, idx);
+ }
+
+ /* Default completion ring */
+ {
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ bnxt_free_cp_ring(bp, cpr, 0);
+ }
+
+ return rc;
+}
+
int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
{
uint16_t i;
return rc;
}
+void bnxt_free_all_hwrm_resources(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+ unsigned int i;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+ bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
+
+ /* VNIC resources */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ bnxt_clear_hwrm_vnic_filters(bp, vnic);
+
+ bnxt_hwrm_vnic_ctx_free(bp, vnic);
+ bnxt_hwrm_vnic_free(bp, vnic);
+ }
+ /* Ring resources */
+ bnxt_free_all_hwrm_rings(bp);
+ bnxt_free_all_hwrm_ring_grps(bp);
+ bnxt_free_all_hwrm_stat_ctxs(bp);
+}
+
static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
{
uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp);
int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp);
int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp);
+int bnxt_free_all_hwrm_rings(struct bnxt *bp);
int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp);
int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp);
int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
#include "bnxt.h"
#include "bnxt_cpr.h"
+#include "bnxt_hwrm.h"
#include "bnxt_ring.h"
+#include "bnxt_rxq.h"
#include "bnxt_rxr.h"
+#include "bnxt_txq.h"
#include "bnxt_txr.h"
#include "hsi_struct_def_dpdk.h"
rte_memzone_free((const struct rte_memzone *)ring->mem_zone);
}
+/*
+ * Ring groups
+ */
+
+void bnxt_init_ring_grps(struct bnxt *bp)
+{
+ unsigned int i;
+
+ for (i = 0; i < bp->max_ring_grps; i++)
+ memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
+ sizeof(struct bnxt_ring_grp_info));
+}
+
/*
* Allocates a completion ring with vmem and stats optionally also allocating
* a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info
cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
return 0;
}
+
+/* ring_grp usage:
+ * [0] = default completion ring
+ * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
+ * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
+ */
+int bnxt_alloc_hwrm_rings(struct bnxt *bp)
+{
+ unsigned int i;
+ int rc = 0;
+
+ /* Default completion ring */
+ {
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
+ 0, HWRM_NA_SIGNATURE);
+ if (rc)
+ goto err_out;
+ cpr->cp_doorbell =
+ (char *)bp->eth_dev->pci_dev->mem_resource[2].addr;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
+ }
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_ring *ring = rxr->rx_ring_struct;
+ unsigned int idx = i + 1;
+
+ /* Rx cmpl */
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
+ idx, HWRM_NA_SIGNATURE);
+ if (rc)
+ goto err_out;
+ cpr->cp_doorbell =
+ (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ idx * 0x80;
+ bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+
+ /* Rx ring */
+ rc = bnxt_hwrm_ring_alloc(bp, ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ idx, cpr->hw_stats_ctx_id);
+ if (rc)
+ goto err_out;
+ rxr->rx_prod = 0;
+ rxr->rx_doorbell =
+ (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ idx * 0x80;
+ bp->grp_info[idx].rx_fw_ring_id = ring->fw_ring_id;
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ if (bnxt_init_one_rx_ring(rxq)) {
+ RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!");
+ bnxt_rx_queue_release_op(rxq);
+ return -ENOMEM;
+ }
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ }
+
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+ unsigned int idx = 1 + bp->rx_cp_nr_rings + i;
+
+ /* Tx cmpl */
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
+ idx, HWRM_NA_SIGNATURE);
+ if (rc)
+ goto err_out;
+
+ cpr->cp_doorbell =
+ (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ idx * 0x80;
+ bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+
+ /* Tx ring */
+ rc = bnxt_hwrm_ring_alloc(bp, ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
+ idx, cpr->hw_stats_ctx_id);
+ if (rc)
+ goto err_out;
+
+ txr->tx_doorbell =
+ (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ idx * 0x80;
+ }
+
+err_out:
+ return rc;
+}
struct bnxt_rx_ring_info;
struct bnxt_cp_ring_info;
void bnxt_free_ring(struct bnxt_ring *ring);
+void bnxt_init_ring_grps(struct bnxt *bp);
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
struct bnxt_tx_ring_info *tx_ring_info,
struct bnxt_rx_ring_info *rx_ring_info,
struct bnxt_cp_ring_info *cp_ring_info,
const char *suffix);
+int bnxt_alloc_hwrm_rings(struct bnxt *bp);
#endif