#include <rte_malloc.h>
#include "bnxt.h"
-#include "bnxt_cpr.h"
#include "bnxt_filter.h"
#include "bnxt_hwrm.h"
#include "bnxt_ring.h"
int start_grp_id, end_grp_id = 1, rc = 0;
struct bnxt_vnic_info *vnic;
struct bnxt_filter_info *filter;
- enum rte_eth_nb_pools pools = bp->rx_cp_nr_rings, max_pools = 0;
+ enum rte_eth_nb_pools pools = 1, max_pools = 0;
struct bnxt_rx_queue *rxq;
bp->nr_vnics = 0;
rc = -ENOMEM;
goto err_out;
}
+ filter->mac_index = 0;
+ filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
goto out;
}
switch (dev_conf->rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_RSS:
case ETH_MQ_RX_VMDQ_ONLY:
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
/* FALLTHROUGH */
/* ETH_8/64_POOLs */
pools = conf->nb_queue_pools;
pools = max_pools;
break;
case ETH_MQ_RX_RSS:
- pools = 1;
+ pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
break;
default:
PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
rc = -EINVAL;
goto err_out;
}
+ } else if (!dev_conf->rxmode.mq_mode) {
+ pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : pools;
}
+
+ pools = RTE_MIN(pools, bp->rx_cp_nr_rings);
nb_q_per_grp = bp->rx_cp_nr_rings / pools;
+ bp->rx_num_qs_per_vnic = nb_q_per_grp;
PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
pools, nb_q_per_grp);
start_grp_id = 0;
rc = -ENOMEM;
goto err_out;
}
+ filter->mac_index = 0;
+ filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
/*
* TODO: Configure & associate CFA rule for
* each VNIC for each VMDq with MACVLAN, MACVLAN+TC
out:
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
- uint16_t hash_type = 0;
if (bp->flags & BNXT_FLAG_UPDATE_HASH) {
rss = &bp->rss_conf;
bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
}
- if (rss->rss_hf & ETH_RSS_IPV4)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
- if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
- if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
- if (rss->rss_hf & ETH_RSS_IPV6)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
- if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
- if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
-
for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
- vnic->hash_type = hash_type;
+ vnic->hash_type =
+ bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
/*
* Use the supplied key if the key length is
struct bnxt_tpa_info *tpa_info;
uint16_t i;
+ if (!rxq)
+ return;
+
rte_spinlock_lock(&rxq->lock);
- if (rxq) {
- sw_ring = rxq->rx_ring->rx_buf_ring;
- if (sw_ring) {
- for (i = 0;
- i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
- if (sw_ring[i].mbuf) {
- rte_pktmbuf_free_seg(sw_ring[i].mbuf);
- sw_ring[i].mbuf = NULL;
- }
+ sw_ring = rxq->rx_ring->rx_buf_ring;
+ if (sw_ring) {
+ for (i = 0;
+ i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
+ if (sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(sw_ring[i].mbuf);
+ sw_ring[i].mbuf = NULL;
}
}
- /* Free up mbufs in Agg ring */
- sw_ring = rxq->rx_ring->ag_buf_ring;
- if (sw_ring) {
- for (i = 0;
- i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
- if (sw_ring[i].mbuf) {
- rte_pktmbuf_free_seg(sw_ring[i].mbuf);
- sw_ring[i].mbuf = NULL;
- }
+ }
+ /* Free up mbufs in Agg ring */
+ sw_ring = rxq->rx_ring->ag_buf_ring;
+ if (sw_ring) {
+ for (i = 0;
+ i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
+ if (sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(sw_ring[i].mbuf);
+ sw_ring[i].mbuf = NULL;
}
}
+ }
+
+ /* Free up mbufs in TPA */
+ tpa_info = rxq->rx_ring->tpa_info;
+ if (tpa_info) {
+ int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
- /* Free up mbufs in TPA */
- tpa_info = rxq->rx_ring->tpa_info;
- if (tpa_info) {
- for (i = 0; i < BNXT_TPA_MAX; i++) {
- if (tpa_info[i].mbuf) {
- rte_pktmbuf_free_seg(tpa_info[i].mbuf);
- tpa_info[i].mbuf = NULL;
- }
+ for (i = 0; i < max_aggs; i++) {
+ if (tpa_info[i].mbuf) {
+ rte_pktmbuf_free_seg(tpa_info[i].mbuf);
+ tpa_info[i].mbuf = NULL;
}
}
}
struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
if (rxq) {
+ if (is_bnxt_in_error(rxq->bp))
+ return;
+
bnxt_rx_queue_release_mbufs(rxq);
/* Free RX ring hardware descriptors */
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt *bp = eth_dev->data->dev_private;
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
struct bnxt_rx_queue *rxq;
int rc = 0;
uint8_t queue_state;
- if (queue_idx >= bp->max_rx_rings) {
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
+ if (queue_idx >= BNXT_MAX_RINGS(bp)) {
PMD_DRV_LOG(ERR,
"Cannot create Rx ring %d. Only %d rings available\n",
queue_idx, bp->max_rx_rings);
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
- PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_use_size);
PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc)
goto out;
+ PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
eth_dev->data->rx_queues[queue_idx] = rxq;
/* Allocate RX ring hardware descriptors */
- if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
- "rxr")) {
+ if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL,
+ "rxr")) {
PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
}
rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
- rxq->rx_deferred_start = rx_conf->rx_deferred_start;
- queue_state = rxq->rx_deferred_start ? RTE_ETH_QUEUE_STATE_STOPPED :
- RTE_ETH_QUEUE_STATE_STARTED;
+ /* rxq 0 must not be stopped when used as async CPR */
+ if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
+ rxq->rx_deferred_start = false;
+ else
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+
+ if (rxq->rx_deferred_start) {
+ queue_state = RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_started = false;
+ } else {
+ queue_state = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->rx_started = true;
+ }
eth_dev->data->rx_queue_state[queue_idx] = queue_state;
rte_spinlock_init(&rxq->lock);
-#ifdef RTE_ARCH_X86
- bnxt_rxq_vec_setup(rxq);
-#endif
+ /* Configure mtu if it is different from what was configured before */
+ if (!queue_idx)
+ bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
out:
return rc;
int
bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
{
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_rx_queue *rxq;
struct bnxt_cp_ring_info *cpr;
int rc = 0;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (eth_dev->data->rx_queues) {
rxq = eth_dev->data->rx_queues[queue_id];
- if (!rxq) {
- rc = -EINVAL;
- return rc;
- }
+ if (!rxq)
+ return -EINVAL;
+
cpr = rxq->cp_ring;
- B_CP_DB_ARM(cpr);
+ B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
}
return rc;
}
int
bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
{
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_rx_queue *rxq;
struct bnxt_cp_ring_info *cpr;
int rc = 0;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (eth_dev->data->rx_queues) {
rxq = eth_dev->data->rx_queues[queue_id];
- if (!rxq) {
- rc = -EINVAL;
- return rc;
- }
+ if (!rxq)
+ return -EINVAL;
+
cpr = rxq->cp_ring;
B_CP_DB_DISARM(cpr);
}
int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
struct bnxt_vnic_info *vnic = NULL;
int rc = 0;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (rxq == NULL) {
PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
return -EINVAL;
}
+ /* Set the queue state to started here.
+ * We check the status of the queue while posting buffer.
+ * If queue is it started, we do not post buffers for Rx.
+ */
+ rxq->rx_started = true;
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
- bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
+ rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
+ if (rc)
+ return rc;
+
+ if (BNXT_CHIP_THOR(bp)) {
+ /* Reconfigure default receive ring and MRU. */
+ bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
+ }
PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
- if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
- return 0;
-
- PMD_DRV_LOG(DEBUG,
- "vnic = %p fw_grp_id = %d\n",
- vnic, bp->grp_info[rx_queue_id].fw_grp_id);
+ if (BNXT_HAS_RING_GRPS(bp)) {
+ if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
+ return 0;
- vnic->fw_grp_ids[rx_queue_id] =
+ vnic->fw_grp_ids[rx_queue_id] =
bp->grp_info[rx_queue_id].fw_grp_id;
+ PMD_DRV_LOG(DEBUG,
+ "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id].fw_grp_id);
+ }
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
rc = bnxt_vnic_rss_configure(bp, vnic);
}
- if (rc == 0)
- rxq->rx_deferred_start = false;
+ if (rc != 0) {
+ dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_started = false;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "queue %d, rx_deferred_start %d, state %d!\n",
+ rx_queue_id, rxq->rx_deferred_start,
+ bp->eth_dev->data->rx_queue_state[rx_queue_id]);
return rc;
}
int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_vnic_info *vnic = NULL;
struct bnxt_rx_queue *rxq = NULL;
- int rc = 0;
+ int active_queue_cnt = 0;
+ int i, rc = 0;
- /* Rx CQ 0 also works as Default CQ for async notifications */
- if (!rx_queue_id) {
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
+ /* For the stingray platform and other platforms needing tighter
+ * control of resource utilization, Rx CQ 0 also works as
+ * Default CQ for async notifications
+ */
+ if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
return -EINVAL;
}
rxq = bp->rx_queues[rx_queue_id];
-
- if (rxq == NULL) {
+ if (!rxq) {
PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
return -EINVAL;
}
+ vnic = rxq->vnic;
+ if (!vnic) {
+ PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n",
+ rx_queue_id);
+ return -EINVAL;
+ }
+
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- rxq->rx_deferred_start = true;
+ rxq->rx_started = false;
PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
- vnic = rxq->vnic;
- vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
+ if (BNXT_HAS_RING_GRPS(bp))
+ vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
rc = bnxt_vnic_rss_configure(bp, vnic);
}
+ if (BNXT_CHIP_THOR(bp)) {
+ /* Compute current number of active receive queues. */
+ for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
+ if (bp->rx_queues[i]->rx_started)
+ active_queue_cnt++;
+
+ /*
+ * For Thor, we need to ensure that the VNIC default receive
+ * ring corresponds to an active receive queue. When no queue
+ * is active, we need to temporarily set the MRU to zero so
+ * that packets are dropped early in the receive pipeline in
+ * order to prevent the VNIC default receive ring from being
+ * accessed.
+ */
+ if (active_queue_cnt == 0) {
+ uint16_t saved_mru = vnic->mru;
+
+ vnic->mru = 0;
+ /* Reconfigure default receive ring and MRU. */
+ bnxt_hwrm_vnic_cfg(bp, vnic);
+ vnic->mru = saved_mru;
+ } else {
+ /* Reconfigure default receive ring. */
+ bnxt_hwrm_vnic_cfg(bp, vnic);
+ }
+ }
+
if (rc == 0)
bnxt_rx_queue_release_mbufs(rxq);