/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2018 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
* All rights reserved.
*/
* RX Queues
*/
+uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp)
+{
+ uint64_t rx_offload_capa;
+
+ rx_offload_capa = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+ rx_offload_capa |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM;
+
+ if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
+ rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
+ rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+
+ return rx_offload_capa;
+}
+
+/* Determine whether the current configuration needs aggregation ring in HW. */
+int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev)
+{
+ /* scattered_rx will be true if OFFLOAD_SCATTER is enabled,
+ * if LRO is enabled, or if the max packet len is greater than the
+ * mbuf data size. So AGG ring will be needed whenever scattered_rx
+ * is set.
+ */
+ return eth_dev->data->scattered_rx ? 1 : 0;
+}
+
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
{
if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
int bnxt_mq_rx_configure(struct bnxt *bp)
{
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct rte_eth_rss_conf *rss = &bp->rss_conf;
const struct rte_eth_vmdq_rx_conf *conf =
&dev_conf->rx_adv_conf.vmdq_rx_conf;
unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
bp->nr_vnics = 0;
- /* Single queue mode */
- if (bp->rx_cp_nr_rings < 2) {
- vnic = &bp->vnic_info[0];
- if (!vnic) {
- PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
- rc = -ENOMEM;
- goto err_out;
- }
- vnic->flags |= BNXT_VNIC_INFO_BCAST;
- bp->nr_vnics++;
-
- rxq = bp->eth_dev->data->rx_queues[0];
- rxq->vnic = vnic;
-
- vnic->func_default = true;
- vnic->start_grp_id = 0;
- vnic->end_grp_id = vnic->start_grp_id;
- filter = bnxt_alloc_filter(bp);
- if (!filter) {
- PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
- rc = -ENOMEM;
- goto err_out;
- }
- filter->mac_index = 0;
- filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
- STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
- goto out;
- }
-
/* Multi-queue mode */
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
switch (dev_conf->rxmode.mq_mode) {
- case ETH_MQ_RX_VMDQ_RSS:
- case ETH_MQ_RX_VMDQ_ONLY:
- case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_RSS:
+ case RTE_ETH_MQ_RX_VMDQ_ONLY:
+ case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
/* FALLTHROUGH */
/* ETH_8/64_POOLs */
pools = conf->nb_queue_pools;
max_pools = RTE_MIN(bp->max_vnics,
RTE_MIN(bp->max_l2_ctx,
RTE_MIN(bp->max_rsscos_ctx,
- ETH_64_POOLS)));
+ RTE_ETH_64_POOLS)));
PMD_DRV_LOG(DEBUG,
"pools = %u max_pools = %u\n",
pools, max_pools);
if (pools > max_pools)
pools = max_pools;
break;
- case ETH_MQ_RX_RSS:
+ case RTE_ETH_MQ_RX_RSS:
pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
break;
default:
pools = RTE_MIN(pools, bp->rx_cp_nr_rings);
nb_q_per_grp = bp->rx_cp_nr_rings / pools;
- bp->rx_num_qs_per_vnic = nb_q_per_grp;
PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
pools, nb_q_per_grp);
start_grp_id = 0;
ring_idx, rxq, i, vnic);
}
if (i == 0) {
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
bp->eth_dev->data->promiscuous = 1;
vnic->flags |= BNXT_VNIC_INFO_PROMISC;
}
vnic->end_grp_id = end_grp_id;
if (i) {
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
- !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
+ !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
vnic->rss_dflt_cr = true;
goto skip_filter_allocation;
}
end_grp_id += nb_q_per_grp;
}
-out:
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
- struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
-
- if (bp->flags & BNXT_FLAG_UPDATE_HASH)
- bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
-
- for (i = 0; i < bp->nr_vnics; i++) {
- vnic = &bp->vnic_info[i];
- vnic->hash_type =
- bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
-
- /*
- * Use the supplied key if the key length is
- * acceptable and the rss_key is not NULL
- */
- if (rss->rss_key &&
- rss->rss_key_len <= HW_HASH_KEY_SIZE)
- memcpy(vnic->rss_hash_key,
- rss->rss_key, rss->rss_key_len);
- }
+ bp->rx_num_qs_per_vnic = nb_q_per_grp;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
+
+ vnic = &bp->vnic_info[i];
+ vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
+ vnic->hash_mode = bnxt_rte_to_hwrm_hash_level(bp, rss->rss_hf, lvl);
+
+ /*
+ * Use the supplied key if the key length is
+ * acceptable and the rss_key is not NULL
+ */
+ if (rss->rss_key && rss->rss_key_len <= HW_HASH_KEY_SIZE)
+ memcpy(vnic->rss_hash_key, rss->rss_key, rss->rss_key_len);
}
return rc;
void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
{
- struct bnxt_sw_rx_bd *sw_ring;
+ struct rte_mbuf **sw_ring;
struct bnxt_tpa_info *tpa_info;
uint16_t i;
- if (!rxq)
+ if (!rxq || !rxq->rx_ring)
return;
- rte_spinlock_lock(&rxq->lock);
-
sw_ring = rxq->rx_ring->rx_buf_ring;
if (sw_ring) {
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
+ /*
+ * The vector receive burst function does not set used
+ * mbuf pointers to NULL, do that here to simplify
+ * cleanup logic.
+ */
+ for (i = 0; i < rxq->rxrearm_nb; i++)
+ sw_ring[rxq->rxrearm_start + i] = NULL;
+ rxq->rxrearm_nb = 0;
+#endif
for (i = 0;
i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
- if (sw_ring[i].mbuf) {
- rte_pktmbuf_free_seg(sw_ring[i].mbuf);
- sw_ring[i].mbuf = NULL;
+ if (sw_ring[i]) {
+ if (sw_ring[i] != &rxq->fake_mbuf)
+ rte_pktmbuf_free_seg(sw_ring[i]);
+ sw_ring[i] = NULL;
}
}
}
/* Free up mbufs in Agg ring */
+ if (rxq->bp == NULL ||
+ rxq->bp->eth_dev == NULL ||
+ !bnxt_need_agg_ring(rxq->bp->eth_dev))
+ return;
+
sw_ring = rxq->rx_ring->ag_buf_ring;
if (sw_ring) {
for (i = 0;
i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
- if (sw_ring[i].mbuf) {
- rte_pktmbuf_free_seg(sw_ring[i].mbuf);
- sw_ring[i].mbuf = NULL;
+ if (sw_ring[i]) {
+ rte_pktmbuf_free_seg(sw_ring[i]);
+ sw_ring[i] = NULL;
}
}
}
}
}
- rte_spinlock_unlock(&rxq->lock);
}
void bnxt_free_rx_mbufs(struct bnxt *bp)
}
}
-void bnxt_rx_queue_release_op(void *rx_queue)
+void bnxt_free_rxq_mem(struct bnxt_rx_queue *rxq)
{
- struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
-
- if (rxq) {
- if (is_bnxt_in_error(rxq->bp))
- return;
+ bnxt_rx_queue_release_mbufs(rxq);
- bnxt_rx_queue_release_mbufs(rxq);
-
- /* Free RX ring hardware descriptors */
+ /* Free RX, AGG ring hardware descriptors */
+ if (rxq->rx_ring) {
bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+ rte_free(rxq->rx_ring->rx_ring_struct);
+ rxq->rx_ring->rx_ring_struct = NULL;
/* Free RX Agg ring hardware descriptors */
bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
+ rte_free(rxq->rx_ring->ag_ring_struct);
+ rxq->rx_ring->ag_ring_struct = NULL;
- /* Free RX completion ring hardware descriptors */
+ rte_free(rxq->rx_ring);
+ rxq->rx_ring = NULL;
+ }
+ /* Free RX completion ring hardware descriptors */
+ if (rxq->cp_ring) {
bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+ rte_free(rxq->cp_ring->cp_ring_struct);
+ rxq->cp_ring->cp_ring_struct = NULL;
+ rte_free(rxq->cp_ring);
+ rxq->cp_ring = NULL;
+ }
+
+ bnxt_free_rxq_stats(rxq);
+ rte_memzone_free(rxq->mz);
+ rxq->mz = NULL;
+}
+
+void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
- bnxt_free_rxq_stats(rxq);
- rte_memzone_free(rxq->mz);
- rxq->mz = NULL;
+ if (rxq != NULL) {
+ if (is_bnxt_in_error(rxq->bp))
+ return;
+ bnxt_free_hwrm_rx_ring(rxq->bp, rxq->queue_id);
+ bnxt_free_rxq_mem(rxq);
rte_free(rxq);
}
}
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
struct bnxt_rx_queue *rxq;
int rc = 0;
- uint8_t queue_state;
rc = is_bnxt_in_error(bp);
if (rc)
return rc;
- if (queue_idx >= BNXT_MAX_RINGS(bp)) {
+ if (queue_idx >= bnxt_max_rings(bp)) {
PMD_DRV_LOG(ERR,
"Cannot create Rx ring %d. Only %d rings available\n",
queue_idx, bp->max_rx_rings);
return -EINVAL;
}
- if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
+ if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_RX_DESC_CNT) {
PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (eth_dev->data->rx_queues) {
rxq = eth_dev->data->rx_queues[queue_idx];
if (rxq)
- bnxt_rx_queue_release_op(rxq);
+ bnxt_rx_queue_release_op(eth_dev, queue_idx);
}
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
- rc = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
rxq->bp = bp;
rxq->mb_pool = mp;
rxq->nb_rx_desc = nb_desc;
- rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->rx_free_thresh =
+ RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_RX_BURST);
+
+ if (rx_conf->rx_drop_en != BNXT_DEFAULT_RX_DROP_EN)
+ PMD_DRV_LOG(NOTICE,
+ "Per-queue config of drop-en is not supported.\n");
+ rxq->drop_en = BNXT_DEFAULT_RX_DROP_EN;
PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
+ eth_dev->data->rx_queues[queue_idx] = rxq;
+
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
- if (rc)
- goto out;
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "init_rx_ring_struct failed!\n");
+ goto err;
+ }
PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
- if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
- eth_dev->data->rx_queues[queue_idx] = rxq;
/* Allocate RX ring hardware descriptors */
- if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL,
- "rxr")) {
+ rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,
+ NULL, "rxr");
+ if (rc) {
PMD_DRV_LOG(ERR,
- "ring_dma_zone_reserve for rx_ring failed!\n");
- bnxt_rx_queue_release_op(rxq);
- rc = -ENOMEM;
- goto out;
+ "ring_dma_zone_reserve for rx_ring failed!\n");
+ goto err;
}
rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
else
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
- if (rxq->rx_deferred_start) {
- queue_state = RTE_ETH_QUEUE_STATE_STOPPED;
- rxq->rx_started = false;
- } else {
- queue_state = RTE_ETH_QUEUE_STATE_STARTED;
- rxq->rx_started = true;
- }
- eth_dev->data->rx_queue_state[queue_idx] = queue_state;
- rte_spinlock_init(&rxq->lock);
+ rxq->rx_started = rxq->rx_deferred_start ? false : true;
+ rxq->vnic = BNXT_GET_DEFAULT_VNIC(bp);
/* Configure mtu if it is different from what was configured before */
if (!queue_idx)
bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
-out:
+ return 0;
+err:
+ bnxt_rx_queue_release_op(eth_dev, queue_idx);
return rc;
}
if (rc)
return rc;
- if (BNXT_CHIP_THOR(bp)) {
+ if (BNXT_CHIP_P5(bp)) {
/* Reconfigure default receive ring and MRU. */
bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
}
PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
if (BNXT_HAS_RING_GRPS(bp)) {
rxq->rx_started = false;
PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
if (BNXT_HAS_RING_GRPS(bp))
vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
rc = bnxt_vnic_rss_configure(bp, vnic);
}
- if (BNXT_CHIP_THOR(bp)) {
- /* Compute current number of active receive queues. */
- for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
- if (bp->rx_queues[i]->rx_started)
- active_queue_cnt++;
+ /* Compute current number of active receive queues. */
+ for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
+ if (bp->rx_queues[i]->rx_started)
+ active_queue_cnt++;
+ if (BNXT_CHIP_P5(bp)) {
/*
* For Thor, we need to ensure that the VNIC default receive
* ring corresponds to an active receive queue. When no queue
if (active_queue_cnt == 0) {
uint16_t saved_mru = vnic->mru;
+ /* clear RSS setting on vnic. */
+ bnxt_vnic_rss_clear_p5(bp, vnic);
+
vnic->mru = 0;
/* Reconfigure default receive ring and MRU. */
bnxt_hwrm_vnic_cfg(bp, vnic);
/* Reconfigure default receive ring. */
bnxt_hwrm_vnic_cfg(bp, vnic);
}
+ } else if (active_queue_cnt) {
+ /*
+ * If the queue being stopped is the current default queue and
+ * there are other active queues, pick one of them as the
+ * default and reconfigure the vnic.
+ */
+ if (vnic->dflt_ring_grp == bp->grp_info[rx_queue_id].fw_grp_id) {
+ for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
+ if (bp->rx_queues[i]->rx_started) {
+ vnic->dflt_ring_grp =
+ bp->grp_info[i].fw_grp_id;
+ bnxt_hwrm_vnic_cfg(bp, vnic);
+ break;
+ }
+ }
+ }
}
if (rc == 0)