1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_malloc.h>
12 #include "bnxt_filter.h"
13 #include "bnxt_hwrm.h"
14 #include "bnxt_ring.h"
17 #include "bnxt_vnic.h"
18 #include "hsi_struct_def_dpdk.h"
24 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
26 if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
27 rxq->cp_ring->hw_stats = NULL;
30 int bnxt_mq_rx_configure(struct bnxt *bp)
32 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
33 const struct rte_eth_vmdq_rx_conf *conf =
34 &dev_conf->rx_adv_conf.vmdq_rx_conf;
35 unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
36 int start_grp_id, end_grp_id = 1, rc = 0;
37 struct bnxt_vnic_info *vnic;
38 struct bnxt_filter_info *filter;
39 enum rte_eth_nb_pools pools = bp->rx_cp_nr_rings, max_pools = 0;
40 struct bnxt_rx_queue *rxq;
44 /* Single queue mode */
45 if (bp->rx_cp_nr_rings < 2) {
46 vnic = bnxt_alloc_vnic(bp);
48 PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
52 vnic->flags |= BNXT_VNIC_INFO_BCAST;
53 STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
56 rxq = bp->eth_dev->data->rx_queues[0];
59 vnic->func_default = true;
60 vnic->ff_pool_idx = 0;
61 vnic->start_grp_id = 0;
62 vnic->end_grp_id = vnic->start_grp_id;
63 filter = bnxt_alloc_filter(bp);
65 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
69 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
73 /* Multi-queue mode */
74 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
75 /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
77 switch (dev_conf->rxmode.mq_mode) {
78 case ETH_MQ_RX_VMDQ_RSS:
79 case ETH_MQ_RX_VMDQ_ONLY:
82 pools = conf->nb_queue_pools;
83 /* For each pool, allocate MACVLAN CFA rule & VNIC */
84 max_pools = RTE_MIN(bp->max_vnics,
85 RTE_MIN(bp->max_l2_ctx,
86 RTE_MIN(bp->max_rsscos_ctx,
88 if (pools > max_pools)
95 PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
96 dev_conf->rxmode.mq_mode);
102 nb_q_per_grp = bp->rx_cp_nr_rings / pools;
104 end_grp_id = nb_q_per_grp;
106 for (i = 0; i < pools; i++) {
107 vnic = bnxt_alloc_vnic(bp);
109 PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
113 vnic->flags |= BNXT_VNIC_INFO_BCAST;
114 STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
117 for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
118 rxq = bp->eth_dev->data->rx_queues[ring_idx];
122 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
123 bp->eth_dev->data->promiscuous = 1;
124 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
126 vnic->func_default = true;
128 vnic->ff_pool_idx = i;
129 vnic->start_grp_id = start_grp_id;
130 vnic->end_grp_id = end_grp_id;
133 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
134 !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
135 vnic->rss_dflt_cr = true;
136 goto skip_filter_allocation;
138 filter = bnxt_alloc_filter(bp);
140 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
145 * TODO: Configure & associate CFA rule for
146 * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
148 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
150 skip_filter_allocation:
151 start_grp_id = end_grp_id;
152 end_grp_id += nb_q_per_grp;
156 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
157 struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
158 uint16_t hash_type = 0;
160 if (bp->flags & BNXT_FLAG_UPDATE_HASH) {
162 bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
165 if (rss->rss_hf & ETH_RSS_IPV4)
166 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
167 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
168 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
169 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
170 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
171 if (rss->rss_hf & ETH_RSS_IPV6)
172 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
173 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
174 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
175 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
176 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
178 for (i = 0; i < bp->nr_vnics; i++) {
179 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
180 vnic->hash_type = hash_type;
183 * Use the supplied key if the key length is
184 * acceptable and the rss_key is not NULL
187 rss->rss_key_len <= HW_HASH_KEY_SIZE)
188 memcpy(vnic->rss_hash_key,
189 rss->rss_key, rss->rss_key_len);
197 /* Free allocated vnic/filters */
202 static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
204 struct bnxt_sw_rx_bd *sw_ring;
205 struct bnxt_tpa_info *tpa_info;
209 sw_ring = rxq->rx_ring->rx_buf_ring;
212 i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
213 if (sw_ring[i].mbuf) {
214 rte_pktmbuf_free_seg(sw_ring[i].mbuf);
215 sw_ring[i].mbuf = NULL;
219 /* Free up mbufs in Agg ring */
220 sw_ring = rxq->rx_ring->ag_buf_ring;
223 i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
224 if (sw_ring[i].mbuf) {
225 rte_pktmbuf_free_seg(sw_ring[i].mbuf);
226 sw_ring[i].mbuf = NULL;
231 /* Free up mbufs in TPA */
232 tpa_info = rxq->rx_ring->tpa_info;
234 for (i = 0; i < BNXT_TPA_MAX; i++) {
235 if (tpa_info[i].mbuf) {
236 rte_pktmbuf_free_seg(tpa_info[i].mbuf);
237 tpa_info[i].mbuf = NULL;
244 void bnxt_free_rx_mbufs(struct bnxt *bp)
246 struct bnxt_rx_queue *rxq;
249 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
250 rxq = bp->rx_queues[i];
251 bnxt_rx_queue_release_mbufs(rxq);
255 void bnxt_rx_queue_release_op(void *rx_queue)
257 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
260 bnxt_rx_queue_release_mbufs(rxq);
262 /* Free RX ring hardware descriptors */
263 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
264 /* Free RX Agg ring hardware descriptors */
265 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
267 /* Free RX completion ring hardware descriptors */
268 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
270 bnxt_free_rxq_stats(rxq);
271 rte_memzone_free(rxq->mz);
278 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
281 unsigned int socket_id,
282 const struct rte_eth_rxconf *rx_conf,
283 struct rte_mempool *mp)
285 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
286 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
287 struct bnxt_rx_queue *rxq;
290 if (queue_idx >= bp->max_rx_rings) {
292 "Cannot create Rx ring %d. Only %d rings available\n",
293 queue_idx, bp->max_rx_rings);
297 if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
298 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
303 if (eth_dev->data->rx_queues) {
304 rxq = eth_dev->data->rx_queues[queue_idx];
306 bnxt_rx_queue_release_op(rxq);
308 rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
309 RTE_CACHE_LINE_SIZE, socket_id);
311 PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
317 rxq->nb_rx_desc = nb_desc;
318 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
320 PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_use_size);
321 PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
323 rc = bnxt_init_rx_ring_struct(rxq, socket_id);
327 rxq->queue_id = queue_idx;
328 rxq->port_id = eth_dev->data->port_id;
329 rxq->crc_len = rte_eth_dev_must_keep_crc(rx_offloads) ?
332 eth_dev->data->rx_queues[queue_idx] = rxq;
333 /* Allocate RX ring hardware descriptors */
334 if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
337 "ring_dma_zone_reserve for rx_ring failed!\n");
338 bnxt_rx_queue_release_op(rxq);
342 rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
349 bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
351 struct bnxt_rx_queue *rxq;
352 struct bnxt_cp_ring_info *cpr;
355 if (eth_dev->data->rx_queues) {
356 rxq = eth_dev->data->rx_queues[queue_id];
368 bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
370 struct bnxt_rx_queue *rxq;
371 struct bnxt_cp_ring_info *cpr;
374 if (eth_dev->data->rx_queues) {
375 rxq = eth_dev->data->rx_queues[queue_id];
386 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
388 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
389 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
390 struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
391 struct bnxt_vnic_info *vnic = NULL;
394 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
398 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
399 rxq->rx_deferred_start = false;
400 PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
401 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
403 if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
405 PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
406 vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
407 vnic->fw_grp_ids[rx_queue_id] =
408 bp->grp_info[rx_queue_id + 1].fw_grp_id;
409 return bnxt_vnic_rss_configure(bp, vnic);
415 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
417 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
418 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
419 struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
420 struct bnxt_vnic_info *vnic = NULL;
423 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
427 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
428 rxq->rx_deferred_start = true;
429 PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
431 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
433 vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
434 return bnxt_vnic_rss_configure(bp, vnic);