1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_malloc.h>
12 #include "bnxt_filter.h"
13 #include "bnxt_hwrm.h"
14 #include "bnxt_ring.h"
17 #include "bnxt_vnic.h"
18 #include "hsi_struct_def_dpdk.h"
24 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
26 if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
27 rxq->cp_ring->hw_stats = NULL;
30 int bnxt_mq_rx_configure(struct bnxt *bp)
32 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
33 const struct rte_eth_vmdq_rx_conf *conf =
34 &dev_conf->rx_adv_conf.vmdq_rx_conf;
35 unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
36 int start_grp_id, end_grp_id = 1, rc = 0;
37 struct bnxt_vnic_info *vnic;
38 struct bnxt_filter_info *filter;
39 enum rte_eth_nb_pools pools = bp->rx_cp_nr_rings, max_pools = 0;
40 struct bnxt_rx_queue *rxq;
44 /* Single queue mode */
45 if (bp->rx_cp_nr_rings < 2) {
46 vnic = &bp->vnic_info[0];
48 PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
52 vnic->flags |= BNXT_VNIC_INFO_BCAST;
55 rxq = bp->eth_dev->data->rx_queues[0];
58 vnic->func_default = true;
59 vnic->start_grp_id = 0;
60 vnic->end_grp_id = vnic->start_grp_id;
61 filter = bnxt_alloc_filter(bp);
63 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
67 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
71 /* Multi-queue mode */
72 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
73 /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
75 switch (dev_conf->rxmode.mq_mode) {
76 case ETH_MQ_RX_VMDQ_RSS:
77 case ETH_MQ_RX_VMDQ_ONLY:
80 pools = conf->nb_queue_pools;
81 /* For each pool, allocate MACVLAN CFA rule & VNIC */
82 max_pools = RTE_MIN(bp->max_vnics,
83 RTE_MIN(bp->max_l2_ctx,
84 RTE_MIN(bp->max_rsscos_ctx,
87 "pools = %u max_pools = %u\n",
89 if (pools > max_pools)
96 PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
97 dev_conf->rxmode.mq_mode);
102 nb_q_per_grp = bp->rx_cp_nr_rings / pools;
103 bp->rx_num_qs_per_vnic = nb_q_per_grp;
104 PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
105 pools, nb_q_per_grp);
107 end_grp_id = nb_q_per_grp;
109 for (i = 0; i < pools; i++) {
110 vnic = &bp->vnic_info[i];
112 PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
116 vnic->flags |= BNXT_VNIC_INFO_BCAST;
119 for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
120 rxq = bp->eth_dev->data->rx_queues[ring_idx];
123 "rxq[%d] = %p vnic[%d] = %p\n",
124 ring_idx, rxq, i, vnic);
127 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
128 bp->eth_dev->data->promiscuous = 1;
129 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
131 vnic->func_default = true;
133 vnic->start_grp_id = start_grp_id;
134 vnic->end_grp_id = end_grp_id;
137 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
138 !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
139 vnic->rss_dflt_cr = true;
140 goto skip_filter_allocation;
142 filter = bnxt_alloc_filter(bp);
144 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
149 * TODO: Configure & associate CFA rule for
150 * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
152 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
154 skip_filter_allocation:
155 start_grp_id = end_grp_id;
156 end_grp_id += nb_q_per_grp;
160 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
161 struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
163 if (bp->flags & BNXT_FLAG_UPDATE_HASH) {
165 bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
168 for (i = 0; i < bp->nr_vnics; i++) {
169 vnic = &bp->vnic_info[i];
171 bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
174 * Use the supplied key if the key length is
175 * acceptable and the rss_key is not NULL
178 rss->rss_key_len <= HW_HASH_KEY_SIZE)
179 memcpy(vnic->rss_hash_key,
180 rss->rss_key, rss->rss_key_len);
187 /* Free allocated vnic/filters */
192 void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
194 struct bnxt_sw_rx_bd *sw_ring;
195 struct bnxt_tpa_info *tpa_info;
201 rte_spinlock_lock(&rxq->lock);
203 sw_ring = rxq->rx_ring->rx_buf_ring;
206 i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
207 if (sw_ring[i].mbuf) {
208 rte_pktmbuf_free_seg(sw_ring[i].mbuf);
209 sw_ring[i].mbuf = NULL;
213 /* Free up mbufs in Agg ring */
214 sw_ring = rxq->rx_ring->ag_buf_ring;
217 i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
218 if (sw_ring[i].mbuf) {
219 rte_pktmbuf_free_seg(sw_ring[i].mbuf);
220 sw_ring[i].mbuf = NULL;
225 /* Free up mbufs in TPA */
226 tpa_info = rxq->rx_ring->tpa_info;
228 for (i = 0; i < BNXT_TPA_MAX; i++) {
229 if (tpa_info[i].mbuf) {
230 rte_pktmbuf_free_seg(tpa_info[i].mbuf);
231 tpa_info[i].mbuf = NULL;
236 rte_spinlock_unlock(&rxq->lock);
239 void bnxt_free_rx_mbufs(struct bnxt *bp)
241 struct bnxt_rx_queue *rxq;
244 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
245 rxq = bp->rx_queues[i];
246 bnxt_rx_queue_release_mbufs(rxq);
250 void bnxt_rx_queue_release_op(void *rx_queue)
252 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
255 if (is_bnxt_in_error(rxq->bp))
258 bnxt_rx_queue_release_mbufs(rxq);
260 /* Free RX ring hardware descriptors */
261 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
262 /* Free RX Agg ring hardware descriptors */
263 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
265 /* Free RX completion ring hardware descriptors */
266 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
268 bnxt_free_rxq_stats(rxq);
269 rte_memzone_free(rxq->mz);
276 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
279 unsigned int socket_id,
280 const struct rte_eth_rxconf *rx_conf,
281 struct rte_mempool *mp)
283 struct bnxt *bp = eth_dev->data->dev_private;
284 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
285 struct bnxt_rx_queue *rxq;
289 rc = is_bnxt_in_error(bp);
293 if (queue_idx >= bp->max_rx_rings) {
295 "Cannot create Rx ring %d. Only %d rings available\n",
296 queue_idx, bp->max_rx_rings);
300 if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
301 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
306 if (eth_dev->data->rx_queues) {
307 rxq = eth_dev->data->rx_queues[queue_idx];
309 bnxt_rx_queue_release_op(rxq);
311 rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
312 RTE_CACHE_LINE_SIZE, socket_id);
314 PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
320 rxq->nb_rx_desc = nb_desc;
321 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
323 PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
325 rc = bnxt_init_rx_ring_struct(rxq, socket_id);
329 PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
330 rxq->queue_id = queue_idx;
331 rxq->port_id = eth_dev->data->port_id;
332 if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
333 rxq->crc_len = RTE_ETHER_CRC_LEN;
337 eth_dev->data->rx_queues[queue_idx] = rxq;
338 /* Allocate RX ring hardware descriptors */
339 if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
340 rxq->nq_ring, "rxr")) {
342 "ring_dma_zone_reserve for rx_ring failed!\n");
343 bnxt_rx_queue_release_op(rxq);
347 rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
349 /* rxq 0 must not be stopped when used as async CPR */
350 if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
351 rxq->rx_deferred_start = false;
353 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
355 if (rxq->rx_deferred_start) {
356 queue_state = RTE_ETH_QUEUE_STATE_STOPPED;
357 rxq->rx_started = false;
359 queue_state = RTE_ETH_QUEUE_STATE_STARTED;
360 rxq->rx_started = true;
362 eth_dev->data->rx_queue_state[queue_idx] = queue_state;
363 rte_spinlock_init(&rxq->lock);
370 bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
372 struct bnxt *bp = eth_dev->data->dev_private;
373 struct bnxt_rx_queue *rxq;
374 struct bnxt_cp_ring_info *cpr;
377 rc = is_bnxt_in_error(bp);
381 if (eth_dev->data->rx_queues) {
382 rxq = eth_dev->data->rx_queues[queue_id];
388 B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
394 bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
396 struct bnxt *bp = eth_dev->data->dev_private;
397 struct bnxt_rx_queue *rxq;
398 struct bnxt_cp_ring_info *cpr;
401 rc = is_bnxt_in_error(bp);
405 if (eth_dev->data->rx_queues) {
406 rxq = eth_dev->data->rx_queues[queue_id];
417 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
419 struct bnxt *bp = dev->data->dev_private;
420 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
421 struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
422 struct bnxt_vnic_info *vnic = NULL;
425 rc = is_bnxt_in_error(bp);
430 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
434 /* Set the queue state to started here.
435 * We check the status of the queue while posting buffer.
436 * If queue is it started, we do not post buffers for Rx.
438 rxq->rx_started = true;
439 bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
440 rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
444 PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
446 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
449 if (BNXT_HAS_RING_GRPS(bp)) {
450 if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
453 vnic->fw_grp_ids[rx_queue_id] =
454 bp->grp_info[rx_queue_id].fw_grp_id;
456 "vnic = %p fw_grp_id = %d\n",
457 vnic, bp->grp_info[rx_queue_id].fw_grp_id);
460 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
461 if (vnic->rx_queue_cnt > 1)
462 rc = bnxt_vnic_rss_configure(bp, vnic);
466 dev->data->rx_queue_state[rx_queue_id] =
467 RTE_ETH_QUEUE_STATE_STARTED;
469 rxq->rx_started = false;
472 "queue %d, rx_deferred_start %d, state %d!\n",
473 rx_queue_id, rxq->rx_deferred_start,
474 bp->eth_dev->data->rx_queue_state[rx_queue_id]);
479 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
481 struct bnxt *bp = dev->data->dev_private;
482 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
483 struct bnxt_vnic_info *vnic = NULL;
484 struct bnxt_rx_queue *rxq = NULL;
487 rc = is_bnxt_in_error(bp);
491 /* For the stingray platform and other platforms needing tighter
492 * control of resource utilization, Rx CQ 0 also works as
493 * Default CQ for async notifications
495 if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
496 PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
500 rxq = bp->rx_queues[rx_queue_id];
503 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
507 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
508 rxq->rx_started = false;
509 PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
511 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
513 if (BNXT_HAS_RING_GRPS(bp))
514 vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
516 PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
517 if (vnic->rx_queue_cnt > 1)
518 rc = bnxt_vnic_rss_configure(bp, vnic);
522 bnxt_rx_queue_release_mbufs(rxq);