1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
8 #include <rte_malloc.h>
11 #include "bnxt_filter.h"
12 #include "bnxt_hwrm.h"
13 #include "bnxt_ring.h"
16 #include "bnxt_vnic.h"
17 #include "hsi_struct_def_dpdk.h"
23 uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp)
25 uint64_t rx_offload_capa;
27 rx_offload_capa = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
28 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
29 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
30 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
31 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
32 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
33 RTE_ETH_RX_OFFLOAD_TCP_LRO |
34 RTE_ETH_RX_OFFLOAD_SCATTER |
35 RTE_ETH_RX_OFFLOAD_RSS_HASH;
37 rx_offload_capa |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
38 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM;
40 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
41 rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
42 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
43 rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
45 return rx_offload_capa;
48 /* Determine whether the current configuration needs aggregation ring in HW. */
49 int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev)
51 /* scattered_rx will be true if OFFLOAD_SCATTER is enabled,
52 * if LRO is enabled, or if the max packet len is greater than the
53 * mbuf data size. So AGG ring will be needed whenever scattered_rx
56 return eth_dev->data->scattered_rx ? 1 : 0;
59 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
61 if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
62 rxq->cp_ring->hw_stats = NULL;
65 int bnxt_mq_rx_configure(struct bnxt *bp)
67 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
68 const struct rte_eth_vmdq_rx_conf *conf =
69 &dev_conf->rx_adv_conf.vmdq_rx_conf;
70 unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
71 int start_grp_id, end_grp_id = 1, rc = 0;
72 struct bnxt_vnic_info *vnic;
73 struct bnxt_filter_info *filter;
74 enum rte_eth_nb_pools pools = 1, max_pools = 0;
75 struct bnxt_rx_queue *rxq;
79 /* Multi-queue mode */
80 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
81 /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
83 switch (dev_conf->rxmode.mq_mode) {
84 case RTE_ETH_MQ_RX_VMDQ_RSS:
85 case RTE_ETH_MQ_RX_VMDQ_ONLY:
86 case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
89 pools = conf->nb_queue_pools;
90 /* For each pool, allocate MACVLAN CFA rule & VNIC */
91 max_pools = RTE_MIN(bp->max_vnics,
92 RTE_MIN(bp->max_l2_ctx,
93 RTE_MIN(bp->max_rsscos_ctx,
96 "pools = %u max_pools = %u\n",
98 if (pools > max_pools)
101 case RTE_ETH_MQ_RX_RSS:
102 pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
105 PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
106 dev_conf->rxmode.mq_mode);
110 } else if (!dev_conf->rxmode.mq_mode) {
111 pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : pools;
114 pools = RTE_MIN(pools, bp->rx_cp_nr_rings);
115 nb_q_per_grp = bp->rx_cp_nr_rings / pools;
116 PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
117 pools, nb_q_per_grp);
119 end_grp_id = nb_q_per_grp;
121 for (i = 0; i < pools; i++) {
122 vnic = &bp->vnic_info[i];
124 PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
128 vnic->flags |= BNXT_VNIC_INFO_BCAST;
131 for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
132 rxq = bp->eth_dev->data->rx_queues[ring_idx];
135 "rxq[%d] = %p vnic[%d] = %p\n",
136 ring_idx, rxq, i, vnic);
139 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
140 bp->eth_dev->data->promiscuous = 1;
141 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
143 vnic->func_default = true;
145 vnic->start_grp_id = start_grp_id;
146 vnic->end_grp_id = end_grp_id;
149 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
150 !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
151 vnic->rss_dflt_cr = true;
152 goto skip_filter_allocation;
154 filter = bnxt_alloc_filter(bp);
156 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
160 filter->mac_index = 0;
161 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
163 * TODO: Configure & associate CFA rule for
164 * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
166 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
168 skip_filter_allocation:
169 start_grp_id = end_grp_id;
170 end_grp_id += nb_q_per_grp;
173 bp->rx_num_qs_per_vnic = nb_q_per_grp;
175 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
176 struct rte_eth_rss_conf *rss = &bp->rss_conf;
178 for (i = 0; i < bp->nr_vnics; i++) {
179 uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
181 vnic = &bp->vnic_info[i];
183 bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
185 bnxt_rte_to_hwrm_hash_level(bp,
190 * Use the supplied key if the key length is
191 * acceptable and the rss_key is not NULL
194 rss->rss_key_len <= HW_HASH_KEY_SIZE)
195 memcpy(vnic->rss_hash_key,
196 rss->rss_key, rss->rss_key_len);
203 /* Free allocated vnic/filters */
208 void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
210 struct rte_mbuf **sw_ring;
211 struct bnxt_tpa_info *tpa_info;
214 if (!rxq || !rxq->rx_ring)
217 sw_ring = rxq->rx_ring->rx_buf_ring;
219 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
221 * The vector receive burst function does not set used
222 * mbuf pointers to NULL, do that here to simplify
225 for (i = 0; i < rxq->rxrearm_nb; i++)
226 sw_ring[rxq->rxrearm_start + i] = NULL;
230 i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
232 if (sw_ring[i] != &rxq->fake_mbuf)
233 rte_pktmbuf_free_seg(sw_ring[i]);
238 /* Free up mbufs in Agg ring */
239 if (rxq->bp == NULL ||
240 rxq->bp->eth_dev == NULL ||
241 !bnxt_need_agg_ring(rxq->bp->eth_dev))
244 sw_ring = rxq->rx_ring->ag_buf_ring;
247 i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
249 rte_pktmbuf_free_seg(sw_ring[i]);
255 /* Free up mbufs in TPA */
256 tpa_info = rxq->rx_ring->tpa_info;
258 int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
260 for (i = 0; i < max_aggs; i++) {
261 if (tpa_info[i].mbuf) {
262 rte_pktmbuf_free_seg(tpa_info[i].mbuf);
263 tpa_info[i].mbuf = NULL;
270 void bnxt_free_rx_mbufs(struct bnxt *bp)
272 struct bnxt_rx_queue *rxq;
275 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
276 rxq = bp->rx_queues[i];
277 bnxt_rx_queue_release_mbufs(rxq);
281 void bnxt_free_rxq_mem(struct bnxt_rx_queue *rxq)
283 bnxt_rx_queue_release_mbufs(rxq);
285 /* Free RX, AGG ring hardware descriptors */
287 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
288 rte_free(rxq->rx_ring->rx_ring_struct);
289 rxq->rx_ring->rx_ring_struct = NULL;
290 /* Free RX Agg ring hardware descriptors */
291 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
292 rte_free(rxq->rx_ring->ag_ring_struct);
293 rxq->rx_ring->ag_ring_struct = NULL;
295 rte_free(rxq->rx_ring);
298 /* Free RX completion ring hardware descriptors */
300 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
301 rte_free(rxq->cp_ring->cp_ring_struct);
302 rxq->cp_ring->cp_ring_struct = NULL;
303 rte_free(rxq->cp_ring);
307 bnxt_free_rxq_stats(rxq);
308 rte_memzone_free(rxq->mz);
312 void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
314 struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
317 if (is_bnxt_in_error(rxq->bp))
320 bnxt_free_hwrm_rx_ring(rxq->bp, rxq->queue_id);
321 bnxt_free_rxq_mem(rxq);
326 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
329 unsigned int socket_id,
330 const struct rte_eth_rxconf *rx_conf,
331 struct rte_mempool *mp)
333 struct bnxt *bp = eth_dev->data->dev_private;
334 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
335 struct bnxt_rx_queue *rxq;
338 rc = is_bnxt_in_error(bp);
342 if (queue_idx >= bnxt_max_rings(bp)) {
344 "Cannot create Rx ring %d. Only %d rings available\n",
345 queue_idx, bp->max_rx_rings);
349 if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_RX_DESC_CNT) {
350 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
354 if (eth_dev->data->rx_queues) {
355 rxq = eth_dev->data->rx_queues[queue_idx];
357 bnxt_rx_queue_release_op(eth_dev, queue_idx);
359 rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
360 RTE_CACHE_LINE_SIZE, socket_id);
362 PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
367 rxq->nb_rx_desc = nb_desc;
368 rxq->rx_free_thresh =
369 RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_RX_BURST);
371 if (rx_conf->rx_drop_en != BNXT_DEFAULT_RX_DROP_EN)
373 "Per-queue config of drop-en is not supported.\n");
374 rxq->drop_en = BNXT_DEFAULT_RX_DROP_EN;
376 PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
378 eth_dev->data->rx_queues[queue_idx] = rxq;
380 rc = bnxt_init_rx_ring_struct(rxq, socket_id);
383 "init_rx_ring_struct failed!\n");
387 PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
388 rxq->queue_id = queue_idx;
389 rxq->port_id = eth_dev->data->port_id;
390 if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
391 rxq->crc_len = RTE_ETHER_CRC_LEN;
395 /* Allocate RX ring hardware descriptors */
396 rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,
400 "ring_dma_zone_reserve for rx_ring failed!\n");
403 rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
405 /* rxq 0 must not be stopped when used as async CPR */
406 if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
407 rxq->rx_deferred_start = false;
409 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
411 rxq->rx_started = rxq->rx_deferred_start ? false : true;
412 rxq->vnic = BNXT_GET_DEFAULT_VNIC(bp);
414 /* Configure mtu if it is different from what was configured before */
416 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
420 bnxt_rx_queue_release_op(eth_dev, queue_idx);
425 bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
427 struct bnxt *bp = eth_dev->data->dev_private;
428 struct bnxt_rx_queue *rxq;
429 struct bnxt_cp_ring_info *cpr;
432 rc = is_bnxt_in_error(bp);
436 if (eth_dev->data->rx_queues) {
437 rxq = eth_dev->data->rx_queues[queue_id];
442 B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
448 bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
450 struct bnxt *bp = eth_dev->data->dev_private;
451 struct bnxt_rx_queue *rxq;
452 struct bnxt_cp_ring_info *cpr;
455 rc = is_bnxt_in_error(bp);
459 if (eth_dev->data->rx_queues) {
460 rxq = eth_dev->data->rx_queues[queue_id];
470 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
472 struct bnxt *bp = dev->data->dev_private;
473 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
474 struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
475 struct bnxt_vnic_info *vnic = NULL;
478 rc = is_bnxt_in_error(bp);
483 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
487 /* Set the queue state to started here.
488 * We check the status of the queue while posting buffer.
489 * If queue is it started, we do not post buffers for Rx.
491 rxq->rx_started = true;
492 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
494 bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
495 rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
499 if (BNXT_CHIP_P5(bp)) {
500 /* Reconfigure default receive ring and MRU. */
501 bnxt_hwrm_vnic_cfg(bp, rxq->vnic);
503 PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
505 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
508 if (BNXT_HAS_RING_GRPS(bp)) {
509 if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
512 vnic->fw_grp_ids[rx_queue_id] =
513 bp->grp_info[rx_queue_id].fw_grp_id;
515 "vnic = %p fw_grp_id = %d\n",
516 vnic, bp->grp_info[rx_queue_id].fw_grp_id);
519 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
520 rc = bnxt_vnic_rss_configure(bp, vnic);
524 dev->data->rx_queue_state[rx_queue_id] =
525 RTE_ETH_QUEUE_STATE_STOPPED;
526 rxq->rx_started = false;
530 "queue %d, rx_deferred_start %d, state %d!\n",
531 rx_queue_id, rxq->rx_deferred_start,
532 bp->eth_dev->data->rx_queue_state[rx_queue_id]);
537 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
539 struct bnxt *bp = dev->data->dev_private;
540 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
541 struct bnxt_vnic_info *vnic = NULL;
542 struct bnxt_rx_queue *rxq = NULL;
543 int active_queue_cnt = 0;
546 rc = is_bnxt_in_error(bp);
550 /* For the stingray platform and other platforms needing tighter
551 * control of resource utilization, Rx CQ 0 also works as
552 * Default CQ for async notifications
554 if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
555 PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
559 rxq = bp->rx_queues[rx_queue_id];
561 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
567 PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n",
572 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
573 rxq->rx_started = false;
574 PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
576 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
577 if (BNXT_HAS_RING_GRPS(bp))
578 vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
580 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
581 rc = bnxt_vnic_rss_configure(bp, vnic);
584 /* Compute current number of active receive queues. */
585 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++)
586 if (bp->rx_queues[i]->rx_started)
589 if (BNXT_CHIP_P5(bp)) {
591 * For Thor, we need to ensure that the VNIC default receive
592 * ring corresponds to an active receive queue. When no queue
593 * is active, we need to temporarily set the MRU to zero so
594 * that packets are dropped early in the receive pipeline in
595 * order to prevent the VNIC default receive ring from being
598 if (active_queue_cnt == 0) {
599 uint16_t saved_mru = vnic->mru;
601 /* clear RSS setting on vnic. */
602 bnxt_vnic_rss_clear_p5(bp, vnic);
605 /* Reconfigure default receive ring and MRU. */
606 bnxt_hwrm_vnic_cfg(bp, vnic);
607 vnic->mru = saved_mru;
609 /* Reconfigure default receive ring. */
610 bnxt_hwrm_vnic_cfg(bp, vnic);
612 } else if (active_queue_cnt) {
614 * If the queue being stopped is the current default queue and
615 * there are other active queues, pick one of them as the
616 * default and reconfigure the vnic.
618 if (vnic->dflt_ring_grp == bp->grp_info[rx_queue_id].fw_grp_id) {
619 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
620 if (bp->rx_queues[i]->rx_started) {
621 vnic->dflt_ring_grp =
622 bp->grp_info[i].fw_grp_id;
623 bnxt_hwrm_vnic_cfg(bp, vnic);
631 bnxt_rx_queue_release_mbufs(rxq);