1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
13 #include "bnxt_hwrm.h"
14 #include "hsi_struct_def_dpdk.h"
16 static const struct eth_dev_ops bnxt_vf_rep_dev_ops = {
17 .dev_infos_get = bnxt_vf_rep_dev_info_get_op,
18 .dev_configure = bnxt_vf_rep_dev_configure_op,
19 .dev_start = bnxt_vf_rep_dev_start_op,
20 .rx_queue_setup = bnxt_vf_rep_rx_queue_setup_op,
21 .rx_queue_release = bnxt_vf_rep_rx_queue_release_op,
22 .tx_queue_setup = bnxt_vf_rep_tx_queue_setup_op,
23 .tx_queue_release = bnxt_vf_rep_tx_queue_release_op,
24 .link_update = bnxt_vf_rep_link_update_op,
25 .dev_close = bnxt_vf_rep_dev_close_op,
26 .dev_stop = bnxt_vf_rep_dev_stop_op,
27 .stats_get = bnxt_vf_rep_stats_get_op,
28 .stats_reset = bnxt_vf_rep_stats_reset_op,
32 bnxt_vfr_recv(struct bnxt *bp, uint16_t cfa_code, uint16_t queue_id,
33 struct rte_mbuf *mbuf)
35 struct bnxt_sw_rx_bd *prod_rx_buf;
36 struct bnxt_rx_ring_info *rep_rxr;
37 struct bnxt_rx_queue *rep_rxq;
38 struct rte_eth_dev *vfr_eth_dev;
39 struct bnxt_vf_representor *vfr_bp;
44 vf_id = bp->cfa_code_map[cfa_code];
45 /* cfa_code is invalid OR vf_id > MAX REP. Assume normal Rx */
46 if (vf_id == BNXT_VF_IDX_INVALID || vf_id > BNXT_MAX_VF_REPS)
48 vfr_eth_dev = bp->rep_info[vf_id].vfr_eth_dev;
51 vfr_bp = vfr_eth_dev->data->dev_private;
52 if (vfr_bp->rx_cfa_code != cfa_code) {
53 /* cfa_code not meant for this VF rep!!?? */
56 /* If rxq_id happens to be > max rep_queue, use rxq0 */
57 que = queue_id < BNXT_MAX_VF_REP_RINGS ? queue_id : 0;
58 rep_rxq = vfr_bp->rx_queues[que];
59 rep_rxr = rep_rxq->rx_ring;
60 mask = rep_rxr->rx_ring_struct->ring_mask;
62 /* Put this mbuf on the RxQ of the Representor */
64 &rep_rxr->rx_buf_ring[rep_rxr->rx_prod++ & mask];
65 if (!prod_rx_buf->mbuf) {
66 prod_rx_buf->mbuf = mbuf;
67 vfr_bp->rx_bytes[que] += mbuf->pkt_len;
68 vfr_bp->rx_pkts[que]++;
70 vfr_bp->rx_drop_bytes[que] += mbuf->pkt_len;
71 vfr_bp->rx_drop_pkts[que]++;
72 rte_free(mbuf); /* Representor Rx ring full, drop pkt */
79 bnxt_vf_rep_rx_burst(void *rx_queue,
80 struct rte_mbuf **rx_pkts,
83 struct bnxt_rx_queue *rxq = rx_queue;
84 struct bnxt_sw_rx_bd *cons_rx_buf;
85 struct bnxt_rx_ring_info *rxr;
86 uint16_t nb_rx_pkts = 0;
93 mask = rxr->rx_ring_struct->ring_mask;
94 for (i = 0; i < nb_pkts; i++) {
95 cons_rx_buf = &rxr->rx_buf_ring[rxr->rx_cons & mask];
96 if (!cons_rx_buf->mbuf)
98 rx_pkts[nb_rx_pkts] = cons_rx_buf->mbuf;
99 rx_pkts[nb_rx_pkts]->port = rxq->port_id;
100 cons_rx_buf->mbuf = NULL;
109 bnxt_vf_rep_tx_burst(void *tx_queue,
110 struct rte_mbuf **tx_pkts,
111 __rte_unused uint16_t nb_pkts)
113 struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
114 struct bnxt_tx_queue *ptxq;
116 struct bnxt_vf_representor *vf_rep_bp;
124 qid = vfr_txq->txq->queue_id;
125 vf_rep_bp = vfr_txq->bp;
126 parent = vf_rep_bp->parent_dev->data->dev_private;
127 pthread_mutex_lock(&parent->rep_info->vfr_lock);
128 ptxq = parent->tx_queues[qid];
130 ptxq->tx_cfa_action = vf_rep_bp->tx_cfa_action;
132 for (i = 0; i < nb_pkts; i++) {
133 vf_rep_bp->tx_bytes[qid] += tx_pkts[i]->pkt_len;
134 vf_rep_bp->tx_pkts[qid]++;
137 rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
138 ptxq->tx_cfa_action = 0;
139 pthread_mutex_unlock(&parent->rep_info->vfr_lock);
146 int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params)
148 struct bnxt_vf_representor *vf_rep_bp = eth_dev->data->dev_private;
149 struct bnxt_vf_representor *rep_params =
150 (struct bnxt_vf_representor *)params;
151 struct rte_eth_link *link;
152 struct bnxt *parent_bp;
155 vf_rep_bp->vf_id = rep_params->vf_id;
156 vf_rep_bp->switch_domain_id = rep_params->switch_domain_id;
157 vf_rep_bp->parent_dev = rep_params->parent_dev;
159 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
160 eth_dev->data->representor_id = rep_params->vf_id;
162 rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
163 memcpy(vf_rep_bp->mac_addr, vf_rep_bp->dflt_mac_addr,
164 sizeof(vf_rep_bp->mac_addr));
165 eth_dev->data->mac_addrs =
166 (struct rte_ether_addr *)&vf_rep_bp->mac_addr;
167 eth_dev->dev_ops = &bnxt_vf_rep_dev_ops;
169 /* No data-path, but need stub Rx/Tx functions to avoid crash
170 * when testing with ovs-dpdk
172 eth_dev->rx_pkt_burst = bnxt_vf_rep_rx_burst;
173 eth_dev->tx_pkt_burst = bnxt_vf_rep_tx_burst;
174 /* Link state. Inherited from PF or trusted VF */
175 parent_bp = vf_rep_bp->parent_dev->data->dev_private;
176 link = &parent_bp->eth_dev->data->dev_link;
178 eth_dev->data->dev_link.link_speed = link->link_speed;
179 eth_dev->data->dev_link.link_duplex = link->link_duplex;
180 eth_dev->data->dev_link.link_status = link->link_status;
181 eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
183 vf_rep_bp->fw_fid = rep_params->vf_id + parent_bp->first_vf_id;
184 PMD_DRV_LOG(INFO, "vf_rep->fw_fid = %d\n", vf_rep_bp->fw_fid);
185 rc = bnxt_hwrm_get_dflt_vnic_svif(parent_bp, vf_rep_bp->fw_fid,
186 &vf_rep_bp->dflt_vnic_id,
189 PMD_DRV_LOG(ERR, "Failed to get default vnic id of VF\n");
191 PMD_DRV_LOG(INFO, "vf_rep->dflt_vnic_id = %d\n",
192 vf_rep_bp->dflt_vnic_id);
194 PMD_DRV_LOG(INFO, "calling bnxt_print_link_info\n");
195 bnxt_print_link_info(eth_dev);
197 /* Pass the information to the rte_eth_dev_close() that it should also
198 * release the private port resources.
200 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
202 "Switch domain id %d: Representor Device %d init done\n",
203 vf_rep_bp->switch_domain_id, vf_rep_bp->vf_id);
208 int bnxt_vf_representor_uninit(struct rte_eth_dev *eth_dev)
210 struct bnxt *parent_bp;
211 struct bnxt_vf_representor *rep =
212 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
216 eth_dev->data->mac_addrs = NULL;
217 eth_dev->dev_ops = NULL;
219 parent_bp = rep->parent_dev->data->dev_private;
223 parent_bp->num_reps--;
225 if (parent_bp->rep_info)
226 memset(&parent_bp->rep_info[vf_id], 0,
227 sizeof(parent_bp->rep_info[vf_id]));
228 /* mark that this representor has been freed */
232 int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
234 struct bnxt *parent_bp;
235 struct bnxt_vf_representor *rep =
236 (struct bnxt_vf_representor *)eth_dev->data->dev_private;
237 struct rte_eth_link *link;
240 parent_bp = rep->parent_dev->data->dev_private;
241 rc = bnxt_link_update_op(parent_bp->eth_dev, wait_to_compl);
243 /* Link state. Inherited from PF or trusted VF */
244 link = &parent_bp->eth_dev->data->dev_link;
246 eth_dev->data->dev_link.link_speed = link->link_speed;
247 eth_dev->data->dev_link.link_duplex = link->link_duplex;
248 eth_dev->data->dev_link.link_status = link->link_status;
249 eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
250 bnxt_print_link_info(eth_dev);
255 static int bnxt_vfr_alloc(struct bnxt_vf_representor *vfr)
258 struct bnxt *parent_bp;
260 if (!vfr || !vfr->parent_dev) {
262 "No memory allocated for representor\n");
266 parent_bp = vfr->parent_dev->data->dev_private;
268 /* Check if representor has been already allocated in FW */
269 if (vfr->tx_cfa_action && vfr->rx_cfa_code)
273 * Alloc VF rep rules in CFA after default VNIC is created.
274 * Otherwise the FW will create the VF-rep rules with
275 * default drop action.
279 * This is where we need to replace invoking an HWRM cmd
280 * with the new TFLIB ULP API to do more/less the same job
281 rc = bnxt_hwrm_cfa_vfr_alloc(parent_bp,
287 parent_bp->cfa_code_map[vfr->rx_cfa_code] = vfr->vf_id;
288 PMD_DRV_LOG(DEBUG, "allocated representor %d in FW\n",
292 "Failed to alloc representor %d in FW\n",
299 static void bnxt_vf_rep_free_rx_mbufs(struct bnxt_vf_representor *rep_bp)
301 struct bnxt_rx_queue *rxq;
304 for (i = 0; i < rep_bp->rx_nr_rings; i++) {
305 rxq = rep_bp->rx_queues[i];
306 bnxt_rx_queue_release_mbufs(rxq);
310 int bnxt_vf_rep_dev_start_op(struct rte_eth_dev *eth_dev)
312 struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
315 rc = bnxt_vfr_alloc(rep_bp);
318 eth_dev->rx_pkt_burst = &bnxt_vf_rep_rx_burst;
319 eth_dev->tx_pkt_burst = &bnxt_vf_rep_tx_burst;
321 bnxt_vf_rep_link_update_op(eth_dev, 1);
323 eth_dev->data->dev_link.link_status = 0;
324 bnxt_vf_rep_free_rx_mbufs(rep_bp);
330 static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
333 struct bnxt *parent_bp;
335 if (!vfr || !vfr->parent_dev) {
337 "No memory allocated for representor\n");
341 parent_bp = vfr->parent_dev->data->dev_private;
343 /* Check if representor has been already freed in FW */
344 if (!vfr->tx_cfa_action && !vfr->rx_cfa_code)
348 * This is where we need to replace invoking an HWRM cmd
349 * with the new TFLIB ULP API to do more/less the same job
350 rc = bnxt_hwrm_cfa_vfr_free(parent_bp,
355 "Failed to free representor %d in FW\n",
360 parent_bp->cfa_code_map[vfr->rx_cfa_code] = BNXT_VF_IDX_INVALID;
361 PMD_DRV_LOG(DEBUG, "freed representor %d in FW\n",
363 vfr->tx_cfa_action = 0;
364 vfr->rx_cfa_code = 0;
369 void bnxt_vf_rep_dev_stop_op(struct rte_eth_dev *eth_dev)
371 struct bnxt_vf_representor *vfr_bp = eth_dev->data->dev_private;
373 /* Avoid crashes as we are about to free queues */
374 eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
375 eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
377 bnxt_vfr_free(vfr_bp);
379 if (eth_dev->data->dev_started)
380 eth_dev->data->dev_link.link_status = 0;
382 bnxt_vf_rep_free_rx_mbufs(vfr_bp);
385 void bnxt_vf_rep_dev_close_op(struct rte_eth_dev *eth_dev)
387 bnxt_vf_representor_uninit(eth_dev);
390 int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
391 struct rte_eth_dev_info *dev_info)
393 struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
394 struct bnxt *parent_bp;
395 uint16_t max_vnics, i, j, vpool, vrxq;
396 unsigned int max_rx_rings;
400 parent_bp = rep_bp->parent_dev->data->dev_private;
402 PMD_DRV_LOG(ERR, "Rep parent NULL!\n");
405 PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n");
406 dev_info->max_mac_addrs = parent_bp->max_l2_ctx;
407 dev_info->max_hash_mac_addrs = 0;
409 max_rx_rings = BNXT_MAX_VF_REP_RINGS;
410 /* For the sake of symmetry, max_rx_queues = max_tx_queues */
411 dev_info->max_rx_queues = max_rx_rings;
412 dev_info->max_tx_queues = max_rx_rings;
413 dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
414 dev_info->hash_key_size = 40;
415 max_vnics = parent_bp->max_vnics;
418 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
419 dev_info->max_mtu = BNXT_MAX_MTU;
421 /* Fast path specifics */
422 dev_info->min_rx_bufsize = 1;
423 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
425 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
426 if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
427 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
428 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
429 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
432 dev_info->default_rxconf = (struct rte_eth_rxconf) {
438 .rx_free_thresh = 32,
439 /* If no descriptors available, pkts are dropped by default */
443 dev_info->default_txconf = (struct rte_eth_txconf) {
449 .tx_free_thresh = 32,
452 eth_dev->data->dev_conf.intr_conf.lsc = 1;
454 eth_dev->data->dev_conf.intr_conf.rxq = 1;
455 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
456 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
457 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
458 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
463 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
464 * need further investigation.
468 vpool = 64; /* ETH_64_POOLS */
469 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
470 for (i = 0; i < 4; vpool >>= 1, i++) {
471 if (max_vnics > vpool) {
472 for (j = 0; j < 5; vrxq >>= 1, j++) {
473 if (dev_info->max_rx_queues > vrxq) {
479 /* Not enough resources to support VMDq */
483 /* Not enough resources to support VMDq */
487 dev_info->max_vmdq_pools = vpool;
488 dev_info->vmdq_queue_num = vrxq;
490 dev_info->vmdq_pool_base = 0;
491 dev_info->vmdq_queue_base = 0;
496 int bnxt_vf_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
498 struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
500 PMD_DRV_LOG(DEBUG, "Representor dev_configure_op\n");
501 rep_bp->rx_queues = (void *)eth_dev->data->rx_queues;
502 rep_bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
503 rep_bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
508 int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
511 unsigned int socket_id,
512 __rte_unused const struct rte_eth_rxconf *rx_conf,
513 __rte_unused struct rte_mempool *mp)
515 struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
516 struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
517 struct bnxt_rx_queue *parent_rxq;
518 struct bnxt_rx_queue *rxq;
519 struct bnxt_sw_rx_bd *buf_ring;
522 if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
524 "Cannot create Rx ring %d. %d rings available\n",
525 queue_idx, BNXT_MAX_VF_REP_RINGS);
529 if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
530 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
534 parent_rxq = parent_bp->rx_queues[queue_idx];
536 PMD_DRV_LOG(ERR, "Parent RxQ has not been configured yet\n");
540 if (nb_desc != parent_rxq->nb_rx_desc) {
541 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent rxq", nb_desc);
545 if (eth_dev->data->rx_queues) {
546 rxq = eth_dev->data->rx_queues[queue_idx];
548 bnxt_rx_queue_release_op(rxq);
551 rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
552 sizeof(struct bnxt_rx_queue),
553 RTE_CACHE_LINE_SIZE, socket_id);
555 PMD_DRV_LOG(ERR, "bnxt_vfr_rx_queue allocation failed!\n");
559 rxq->nb_rx_desc = nb_desc;
561 rc = bnxt_init_rx_ring_struct(rxq, socket_id);
565 buf_ring = rte_zmalloc_socket("bnxt_rx_vfr_buf_ring",
566 sizeof(struct bnxt_sw_rx_bd) *
567 rxq->rx_ring->rx_ring_struct->ring_size,
568 RTE_CACHE_LINE_SIZE, socket_id);
570 PMD_DRV_LOG(ERR, "bnxt_rx_vfr_buf_ring allocation failed!\n");
575 rxq->rx_ring->rx_buf_ring = buf_ring;
576 rxq->queue_id = queue_idx;
577 rxq->port_id = eth_dev->data->port_id;
578 eth_dev->data->rx_queues[queue_idx] = rxq;
584 bnxt_rx_queue_release_op(rxq);
589 void bnxt_vf_rep_rx_queue_release_op(void *rx_queue)
591 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
596 bnxt_rx_queue_release_mbufs(rxq);
598 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
599 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
600 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
605 int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
608 unsigned int socket_id,
609 __rte_unused const struct rte_eth_txconf *tx_conf)
611 struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
612 struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
613 struct bnxt_tx_queue *parent_txq, *txq;
614 struct bnxt_vf_rep_tx_queue *vfr_txq;
616 if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
618 "Cannot create Tx rings %d. %d rings available\n",
619 queue_idx, BNXT_MAX_VF_REP_RINGS);
623 if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
624 PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
628 parent_txq = parent_bp->tx_queues[queue_idx];
630 PMD_DRV_LOG(ERR, "Parent TxQ has not been configured yet\n");
634 if (nb_desc != parent_txq->nb_tx_desc) {
635 PMD_DRV_LOG(ERR, "nb_desc %d do not match parent txq", nb_desc);
639 if (eth_dev->data->tx_queues) {
640 vfr_txq = eth_dev->data->tx_queues[queue_idx];
641 bnxt_vf_rep_tx_queue_release_op(vfr_txq);
645 vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
646 sizeof(struct bnxt_vf_rep_tx_queue),
647 RTE_CACHE_LINE_SIZE, socket_id);
649 PMD_DRV_LOG(ERR, "bnxt_vfr_tx_queue allocation failed!");
652 txq = rte_zmalloc_socket("bnxt_tx_queue",
653 sizeof(struct bnxt_tx_queue),
654 RTE_CACHE_LINE_SIZE, socket_id);
656 PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
661 txq->nb_tx_desc = nb_desc;
662 txq->queue_id = queue_idx;
663 txq->port_id = eth_dev->data->port_id;
665 vfr_txq->bp = rep_bp;
666 eth_dev->data->tx_queues[queue_idx] = vfr_txq;
671 void bnxt_vf_rep_tx_queue_release_op(void *tx_queue)
673 struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
678 rte_free(vfr_txq->txq);
682 int bnxt_vf_rep_stats_get_op(struct rte_eth_dev *eth_dev,
683 struct rte_eth_stats *stats)
685 struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
688 memset(stats, 0, sizeof(*stats));
689 for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
690 stats->obytes += rep_bp->tx_bytes[i];
691 stats->opackets += rep_bp->tx_pkts[i];
692 stats->ibytes += rep_bp->rx_bytes[i];
693 stats->ipackets += rep_bp->rx_pkts[i];
694 stats->imissed += rep_bp->rx_drop_pkts[i];
696 stats->q_ipackets[i] = rep_bp->rx_pkts[i];
697 stats->q_ibytes[i] = rep_bp->rx_bytes[i];
698 stats->q_opackets[i] = rep_bp->tx_pkts[i];
699 stats->q_obytes[i] = rep_bp->tx_bytes[i];
700 stats->q_errors[i] = rep_bp->rx_drop_pkts[i];
706 int bnxt_vf_rep_stats_reset_op(struct rte_eth_dev *eth_dev)
708 struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
711 for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
712 rep_bp->tx_pkts[i] = 0;
713 rep_bp->tx_bytes[i] = 0;
714 rep_bp->rx_pkts[i] = 0;
715 rep_bp->rx_bytes[i] = 0;
716 rep_bp->rx_drop_pkts[i] = 0;