#define BNXT_MAX_RX_RING_DESC 8192
#define BNXT_DB_SIZE 0x80
+#ifdef RTE_ARCH_ARM64
+#define BNXT_NUM_ASYNC_CPR(bp) (BNXT_STINGRAY(bp) ? 0 : 1)
+#else
+#define BNXT_NUM_ASYNC_CPR(bp) 1
+#endif
+
/* Chimp Communication Channel */
#define GRCPF_REG_CHIMP_CHANNEL_OFFSET 0x0
#define GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_FLAG_TRUSTED_VF_EN (1 << 11)
#define BNXT_FLAG_DFLT_VNIC_SET (1 << 12)
#define BNXT_FLAG_THOR_CHIP (1 << 13)
+#define BNXT_FLAG_STINGRAY (1 << 14)
#define BNXT_FLAG_EXT_STATS_SUPPORTED (1 << 29)
#define BNXT_FLAG_NEW_RM (1 << 30)
#define BNXT_FLAG_INIT_DONE (1U << 31)
#define BNXT_USE_KONG(bp) ((bp)->flags & BNXT_FLAG_KONG_MB_EN)
#define BNXT_VF_IS_TRUSTED(bp) ((bp)->flags & BNXT_FLAG_TRUSTED_VF_EN)
#define BNXT_CHIP_THOR(bp) ((bp)->flags & BNXT_FLAG_THOR_CHIP)
+#define BNXT_STINGRAY(bp) ((bp)->flags & BNXT_FLAG_STINGRAY)
#define BNXT_HAS_NQ(bp) BNXT_CHIP_THOR(bp)
#define BNXT_HAS_RING_GRPS(bp) (!BNXT_CHIP_THOR(bp))
uint16_t fw_tx_port_stats_ext_size;
/* Default completion ring */
- struct bnxt_cp_ring_info *def_cp_ring;
+ struct bnxt_cp_ring_info *async_cp_ring;
uint32_t max_ring_grps;
struct bnxt_ring_grp_info *grp_info;
bnxt_free_stats(bp);
bnxt_free_tx_rings(bp);
bnxt_free_rx_rings(bp);
+ bnxt_free_async_cp_ring(bp);
}
static int bnxt_alloc_mem(struct bnxt *bp)
{
int rc;
+ rc = bnxt_alloc_async_ring_struct(bp);
+ if (rc)
+ goto alloc_mem_err;
+
rc = bnxt_alloc_vnic_mem(bp);
if (rc)
goto alloc_mem_err;
if (rc)
goto alloc_mem_err;
+ rc = bnxt_alloc_async_cp_ring(bp);
+ if (rc)
+ goto alloc_mem_err;
+
return 0;
alloc_mem_err:
/* Inherit new configurations */
if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
- eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
- bp->max_cp_rings ||
+ eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
+ + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
bp->max_stat_ctx)
goto resource_error;
pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF2)
bp->flags |= BNXT_FLAG_THOR_CHIP;
+ if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
+ pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
+ pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
+ pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
+ bp->flags |= BNXT_FLAG_STINGRAY;
+
rc = bnxt_init_board(eth_dev);
if (rc) {
PMD_DRV_LOG(ERR,
req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
AGG_RING_MULTIPLIER);
- req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
+ req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings +
+ bp->tx_nr_rings +
+ BNXT_NUM_ASYNC_CPR(bp));
req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
- bp->tx_nr_rings);
+ bp->tx_nr_rings +
+ BNXT_NUM_ASYNC_CPR(bp));
req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
if (bp->vf_resv_strategy ==
HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
return rc;
}
-static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
sizeof(*cpr->cp_desc_ring));
cpr->cp_raw_cons = 0;
+ cpr->valid = 0;
}
-static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
req.enables = rte_cpu_to_le_32(
HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = rte_cpu_to_le_16(
- bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+ bp->async_cp_ring->cp_ring_struct->fw_ring_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
req.enables = rte_cpu_to_le_32(
HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = rte_cpu_to_le_16(
- bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+ bp->async_cp_ring->cp_ring_struct->fw_ring_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
int bnxt_free_all_hwrm_rings(struct bnxt *bp);
int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp);
int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp);
+void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
+void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
void bnxt_free_all_hwrm_resources(struct bnxt *bp);
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
struct bnxt *bp = eth_dev->data->dev_private;
- struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+ struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
struct cmpl_base *cmp;
uint32_t raw_cons;
uint32_t cons;
bnxt_event_hwrm_resp_handler(bp, cmp);
raw_cons = NEXT_RAW_CMP(raw_cons);
- };
+ }
cpr->cp_raw_cons = raw_cons;
- B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
+ if (BNXT_HAS_NQ(bp))
+ bnxt_db_nq_arm(cpr);
+ else
+ B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
}
int bnxt_free_int(struct bnxt *bp)
void bnxt_disable_int(struct bnxt *bp)
{
- struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+ struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+
+ if (BNXT_NUM_ASYNC_CPR(bp) == 0)
+ return;
+
+ if (!cpr || !cpr->cp_db.doorbell)
+ return;
/* Only the default completion ring */
- if (cpr != NULL && cpr->cp_db.doorbell != NULL)
+ if (BNXT_HAS_NQ(bp))
+ bnxt_db_nq(cpr);
+ else
B_CP_DB_DISARM(cpr);
}
void bnxt_enable_int(struct bnxt *bp)
{
- struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+ struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+
+ if (BNXT_NUM_ASYNC_CPR(bp) == 0)
+ return;
+
+ if (!cpr || !cpr->cp_db.doorbell)
+ return;
/* Only the default completion ring */
- if (cpr != NULL && cpr->cp_db.doorbell != NULL)
+ if (BNXT_HAS_NQ(bp))
+ bnxt_db_nq_arm(cpr);
+ else
B_CP_DB_ARM(cpr);
}
{
uint16_t total_vecs;
const int len = sizeof(bp->irq_tbl[0].name);
- int i, rc = 0;
+ int i;
/* DPDK host only supports 1 MSI-X vector */
total_vecs = 1;
bp->irq_tbl[i].handler = bnxt_int_handler;
}
} else {
- rc = -ENOMEM;
- goto setup_exit;
+ PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
+ return -ENOMEM;
}
- return 0;
-setup_exit:
- PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
- return rc;
+ return 0;
}
int bnxt_request_int(struct bnxt *bp)
#include <rte_bitmap.h>
#include <rte_memzone.h>
+#include <rte_malloc.h>
#include <unistd.h>
#include "bnxt.h"
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
+ int cp_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
uint8_t ring_type;
int rc = 0;
}
}
- rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, queue_index,
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index,
HWRM_NA_SIGNATURE, nq_ring_id);
if (rc)
return rc;
cpr->cp_cons = 0;
- bnxt_set_db(bp, &cpr->cp_db, ring_type, queue_index,
+ bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index,
cp_ring->fw_ring_id);
bnxt_db_cq(cpr);
struct bnxt_cp_ring_info *nqr)
{
struct bnxt_ring *nq_ring = nqr->cp_ring_struct;
+ int nq_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
uint8_t ring_type;
int rc = 0;
ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
- rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, queue_index,
+ rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, nq_ring_index,
HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
if (rc)
return rc;
- bnxt_set_db(bp, &nqr->cp_db, ring_type, queue_index,
+ bnxt_set_db(bp, &nqr->cp_db, ring_type, nq_ring_index,
nq_ring->fw_ring_id);
bnxt_db_nq(nqr);
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
- int rc = 0;
+ int rc;
if (BNXT_HAS_NQ(bp)) {
- if (bnxt_alloc_nq_ring(bp, queue_index, nqr))
+ rc = bnxt_alloc_nq_ring(bp, queue_index, nqr);
+ if (rc)
goto err_out;
}
- if (bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr))
+ rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr);
+ if (rc)
goto err_out;
if (BNXT_HAS_RING_GRPS(bp)) {
bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
}
- if (!queue_index) {
+ if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) {
/*
- * In order to save completion resources, use the first
- * completion ring from PF or VF as the default completion ring
- * for async event and HWRM forward response handling.
+ * If a dedicated async event completion ring is not enabled,
+ * use the first completion ring from PF or VF as the default
+ * completion ring for async event handling.
*/
- bp->def_cp_ring = cpr;
+ bp->async_cp_ring = cpr;
rc = bnxt_hwrm_set_async_event_cr(bp);
if (rc)
goto err_out;
}
- if (bnxt_alloc_rx_ring(bp, queue_index))
+ rc = bnxt_alloc_rx_ring(bp, queue_index);
+ if (rc)
goto err_out;
- if (bnxt_alloc_rx_agg_ring(bp, queue_index))
+ rc = bnxt_alloc_rx_agg_ring(bp, queue_index);
+ if (rc)
goto err_out;
rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
}
rxq->index = queue_index;
- PMD_DRV_LOG(INFO,
- "queue %d, rx_deferred_start %d, state %d!\n",
- queue_index, rxq->rx_deferred_start,
- bp->eth_dev->data->rx_queue_state[queue_index]);
+
+ return 0;
err_out:
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate receive queue %d, rc %d.\n",
+ queue_index, rc);
return rc;
}
}
bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
-
- if (!i) {
+ if (!BNXT_NUM_ASYNC_CPR(bp) && !i) {
/*
- * In order to save completion resource, use the first
- * completion ring from PF or VF as the default
- * completion ring for async event & HWRM
- * forward response handling.
+ * If a dedicated async event completion ring is not
+ * enabled, use the first completion ring as the default
+ * completion ring for async event handling.
*/
- bp->def_cp_ring = cpr;
+ bp->async_cp_ring = cpr;
rc = bnxt_hwrm_set_async_event_cr(bp);
if (rc)
goto err_out;
err_out:
return rc;
}
+
+/* Allocate dedicated async completion ring. */
+int bnxt_alloc_async_cp_ring(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ uint8_t ring_type;
+ int rc;
+
+ if (BNXT_NUM_ASYNC_CPR(bp) == 0)
+ return 0;
+
+ if (BNXT_HAS_NQ(bp))
+ ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
+ else
+ ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
+
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0,
+ HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
+
+ if (rc)
+ return rc;
+
+ cpr->cp_cons = 0;
+ cpr->valid = 0;
+ bnxt_set_db(bp, &cpr->cp_db, ring_type, 0,
+ cp_ring->fw_ring_id);
+
+ if (BNXT_HAS_NQ(bp))
+ bnxt_db_nq(cpr);
+ else
+ bnxt_db_cq(cpr);
+
+ return bnxt_hwrm_set_async_event_cr(bp);
+}
+
+/* Free dedicated async completion ring. */
+void bnxt_free_async_cp_ring(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+
+ if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
+ return;
+
+ if (BNXT_HAS_NQ(bp))
+ bnxt_free_nq_ring(bp, cpr);
+ else
+ bnxt_free_cp_ring(bp, cpr);
+
+ bnxt_free_ring(cpr->cp_ring_struct);
+ rte_free(cpr->cp_ring_struct);
+ cpr->cp_ring_struct = NULL;
+ rte_free(cpr);
+ bp->async_cp_ring = NULL;
+}
+
+int bnxt_alloc_async_ring_struct(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *cpr = NULL;
+ struct bnxt_ring *ring = NULL;
+ unsigned int socket_id;
+
+ if (BNXT_NUM_ASYNC_CPR(bp) == 0)
+ return 0;
+
+ socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
+
+ cpr = rte_zmalloc_socket("cpr",
+ sizeof(struct bnxt_cp_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cpr == NULL)
+ return -ENOMEM;
+
+ ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL) {
+ rte_free(cpr);
+ return -ENOMEM;
+ }
+
+ ring->bd = (void *)cpr->cp_desc_ring;
+ ring->bd_dma = cpr->cp_desc_mapping;
+ ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
+ ring->ring_mask = ring->ring_size - 1;
+ ring->vmem_size = 0;
+ ring->vmem = NULL;
+
+ bp->async_cp_ring = cpr;
+ cpr->cp_ring_struct = ring;
+
+ return bnxt_alloc_rings(bp, 0, NULL, NULL,
+ bp->async_cp_ring, NULL,
+ "def_cp");
+}
const char *suffix);
int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index);
int bnxt_alloc_hwrm_rings(struct bnxt *bp);
+int bnxt_alloc_async_cp_ring(struct bnxt *bp);
+void bnxt_free_async_cp_ring(struct bnxt *bp);
+int bnxt_alloc_async_ring_struct(struct bnxt *bp);
static inline void bnxt_db_write(struct bnxt_db_info *db, uint32_t idx)
{
return -EINVAL;
}
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
-
bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
- bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
+ rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
+ if (rc)
+ return rc;
+
PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
rc = bnxt_vnic_rss_configure(bp, vnic);
}
- if (rc == 0)
+ if (rc == 0) {
+ dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
rxq->rx_deferred_start = false;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "queue %d, rx_deferred_start %d, state %d!\n",
+ rx_queue_id, rxq->rx_deferred_start,
+ bp->eth_dev->data->rx_queue_state[rx_queue_id]);
return rc;
}
struct bnxt_rx_queue *rxq = NULL;
int rc = 0;
- /* Rx CQ 0 also works as Default CQ for async notifications */
- if (!rx_queue_id) {
+ /* For the stingray platform and other platforms needing tighter
+ * control of resource utilization, Rx CQ 0 also works as
+ * Default CQ for async notifications
+ */
+ if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
return -EINVAL;
}
nb_rx_pkts++;
if (rc == -EBUSY) /* partial completion */
break;
- } else {
+ } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
evt =
bnxt_event_hwrm_resp_handler(rxq->bp,
(struct cmpl_base *)rxcmp);
mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
rx_pkts[nb_rx_pkts++] = mbuf;
- } else {
+ } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
evt =
bnxt_event_hwrm_resp_handler(rxq->bp,
(struct cmpl_base *)rxcmp);