4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cycles.h>
44 #include "bnxt_filter.h"
45 #include "bnxt_hwrm.h"
46 #include "bnxt_ring.h"
49 #include "bnxt_stats.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
55 #define DRV_MODULE_NAME "bnxt"
56 static const char bnxt_version[] =
57 "Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
59 static struct rte_pci_id bnxt_pci_id_map[] = {
60 #define RTE_PCI_DEV_ID_DECL_BNXT(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
61 #include "rte_pci_dev_ids.h"
65 #define BNXT_ETH_RSS_SUPPORT ( \
67 ETH_RSS_NONFRAG_IPV4_TCP | \
68 ETH_RSS_NONFRAG_IPV4_UDP | \
70 ETH_RSS_NONFRAG_IPV6_TCP | \
71 ETH_RSS_NONFRAG_IPV6_UDP)
73 /***********************/
76 * High level utility functions
79 static void bnxt_free_mem(struct bnxt *bp)
81 bnxt_free_filter_mem(bp);
82 bnxt_free_vnic_attributes(bp);
83 bnxt_free_vnic_mem(bp);
86 bnxt_free_tx_rings(bp);
87 bnxt_free_rx_rings(bp);
88 bnxt_free_def_cp_ring(bp);
91 static int bnxt_alloc_mem(struct bnxt *bp)
95 /* Default completion ring */
96 rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
100 rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
101 bp->def_cp_ring, "def_cp");
105 rc = bnxt_alloc_vnic_mem(bp);
109 rc = bnxt_alloc_vnic_attributes(bp);
113 rc = bnxt_alloc_filter_mem(bp);
124 static int bnxt_init_chip(struct bnxt *bp)
126 unsigned int i, rss_idx, fw_idx;
129 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
131 RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
135 rc = bnxt_alloc_hwrm_rings(bp);
137 RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
141 rc = bnxt_alloc_all_hwrm_ring_grps(bp);
143 RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
147 rc = bnxt_mq_rx_configure(bp);
149 RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
153 /* VNIC configuration */
154 for (i = 0; i < bp->nr_vnics; i++) {
155 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
157 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
159 RTE_LOG(ERR, PMD, "HWRM vnic alloc failure rc: %x\n",
164 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
167 "HWRM vnic ctx alloc failure rc: %x\n", rc);
171 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
173 RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc);
177 rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
179 RTE_LOG(ERR, PMD, "HWRM vnic filter failure rc: %x\n",
183 if (vnic->rss_table && vnic->hash_type) {
185 * Fill the RSS hash & redirection table with
186 * ring group ids for all VNICs
188 for (rss_idx = 0, fw_idx = 0;
189 rss_idx < HW_HASH_INDEX_SIZE;
190 rss_idx++, fw_idx++) {
191 if (vnic->fw_grp_ids[fw_idx] ==
194 vnic->rss_table[rss_idx] =
195 vnic->fw_grp_ids[fw_idx];
197 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
200 "HWRM vnic set RSS failure rc: %x\n",
206 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]);
209 "HWRM cfa l2 rx mask failure rc: %x\n", rc);
216 bnxt_free_all_hwrm_resources(bp);
221 static int bnxt_shutdown_nic(struct bnxt *bp)
223 bnxt_free_all_hwrm_resources(bp);
224 bnxt_free_all_filters(bp);
225 bnxt_free_all_vnics(bp);
229 static int bnxt_init_nic(struct bnxt *bp)
233 bnxt_init_ring_grps(bp);
235 bnxt_init_filters(bp);
237 rc = bnxt_init_chip(bp);
245 * Device configuration and status function
248 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
249 struct rte_eth_dev_info *dev_info)
251 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
252 uint16_t max_vnics, i, j, vpool, vrxq;
255 dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
256 dev_info->max_hash_mac_addrs = 0;
258 /* PF/VF specifics */
260 dev_info->max_rx_queues = bp->pf.max_rx_rings;
261 dev_info->max_tx_queues = bp->pf.max_tx_rings;
262 dev_info->max_vfs = bp->pf.active_vfs;
263 dev_info->reta_size = bp->pf.max_rsscos_ctx;
264 max_vnics = bp->pf.max_vnics;
266 dev_info->max_rx_queues = bp->vf.max_rx_rings;
267 dev_info->max_tx_queues = bp->vf.max_tx_rings;
268 dev_info->reta_size = bp->vf.max_rsscos_ctx;
269 max_vnics = bp->vf.max_vnics;
272 /* Fast path specifics */
273 dev_info->min_rx_bufsize = 1;
274 dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
276 dev_info->rx_offload_capa = 0;
277 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
278 DEV_TX_OFFLOAD_TCP_CKSUM |
279 DEV_TX_OFFLOAD_UDP_CKSUM |
280 DEV_TX_OFFLOAD_TCP_TSO;
283 dev_info->default_rxconf = (struct rte_eth_rxconf) {
289 .rx_free_thresh = 32,
293 dev_info->default_txconf = (struct rte_eth_txconf) {
299 .tx_free_thresh = 32,
301 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
302 ETH_TXQ_FLAGS_NOOFFLOADS,
307 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
308 * need further investigation.
312 vpool = 64; /* ETH_64_POOLS */
313 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
314 for (i = 0; i < 4; vpool >>= 1, i++) {
315 if (max_vnics > vpool) {
316 for (j = 0; j < 5; vrxq >>= 1, j++) {
317 if (dev_info->max_rx_queues > vrxq) {
323 /* Not enough resources to support VMDq */
327 /* Not enough resources to support VMDq */
331 dev_info->max_vmdq_pools = vpool;
332 dev_info->vmdq_queue_num = vrxq;
334 dev_info->vmdq_pool_base = 0;
335 dev_info->vmdq_queue_base = 0;
338 /* Configure the device based on the configuration provided */
339 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
341 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
344 bp->rx_queues = (void *)eth_dev->data->rx_queues;
345 bp->tx_queues = (void *)eth_dev->data->tx_queues;
347 /* Inherit new configurations */
348 bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
349 bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
350 bp->rx_cp_nr_rings = bp->rx_nr_rings;
351 bp->tx_cp_nr_rings = bp->tx_nr_rings;
353 if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
355 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
356 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
357 rc = bnxt_set_hwrm_link_config(bp, true);
361 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
363 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
366 rc = bnxt_hwrm_func_reset(bp);
368 RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
373 rc = bnxt_alloc_mem(bp);
377 rc = bnxt_init_nic(bp);
384 bnxt_shutdown_nic(bp);
385 bnxt_free_tx_mbufs(bp);
386 bnxt_free_rx_mbufs(bp);
391 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
393 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
395 eth_dev->data->dev_link.link_status = 1;
396 bnxt_set_hwrm_link_config(bp, true);
400 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
402 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
404 eth_dev->data->dev_link.link_status = 0;
405 bnxt_set_hwrm_link_config(bp, false);
409 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
411 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
413 bnxt_free_tx_mbufs(bp);
414 bnxt_free_rx_mbufs(bp);
416 rte_free(eth_dev->data->mac_addrs);
419 /* Unload the driver, release resources */
420 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
422 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
424 if (bp->eth_dev->data->dev_started) {
425 /* TBD: STOP HW queues DMA */
426 eth_dev->data->dev_link.link_status = 0;
428 bnxt_shutdown_nic(bp);
431 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
434 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
435 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
436 struct bnxt_vnic_info *vnic;
437 struct bnxt_filter_info *filter, *temp_filter;
441 * Loop through all VNICs from the specified filter flow pools to
442 * remove the corresponding MAC addr filter
444 for (i = 0; i < MAX_FF_POOLS; i++) {
445 if (!(pool_mask & (1 << i)))
448 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
449 filter = STAILQ_FIRST(&vnic->filter);
451 temp_filter = STAILQ_NEXT(filter, next);
452 if (filter->mac_index == index) {
453 STAILQ_REMOVE(&vnic->filter, filter,
454 bnxt_filter_info, next);
455 bnxt_hwrm_clear_filter(bp, filter);
456 filter->mac_index = INVALID_MAC_INDEX;
457 memset(&filter->l2_addr, 0,
460 &bp->free_filter_list,
463 filter = temp_filter;
469 static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
470 struct ether_addr *mac_addr,
471 uint32_t index, uint32_t pool)
473 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
474 struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
475 struct bnxt_filter_info *filter;
478 RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
481 /* Attach requested MAC address to the new l2_filter */
482 STAILQ_FOREACH(filter, &vnic->filter, next) {
483 if (filter->mac_index == index) {
485 "MAC addr already existed for pool %d\n", pool);
489 filter = bnxt_alloc_filter(bp);
491 RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
494 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
495 filter->mac_index = index;
496 memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
497 bnxt_hwrm_set_filter(bp, vnic, filter);
500 static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
501 int wait_to_complete)
504 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
505 struct rte_eth_link new;
506 unsigned int cnt = BNXT_LINK_WAIT_CNT;
508 memset(&new, 0, sizeof(new));
510 /* Retrieve link info from hardware */
511 rc = bnxt_get_hwrm_link_config(bp, &new);
513 new.link_speed = ETH_LINK_SPEED_100M;
514 new.link_duplex = ETH_LINK_FULL_DUPLEX;
516 "Failed to retrieve link rc = 0x%x!", rc);
519 if (!wait_to_complete)
522 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
524 } while (!new.link_status && cnt--);
526 /* Timed out or success */
527 if (new.link_status) {
528 /* Update only if success */
529 eth_dev->data->dev_link.link_duplex = new.link_duplex;
530 eth_dev->data->dev_link.link_speed = new.link_speed;
532 eth_dev->data->dev_link.link_status = new.link_status;
537 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
539 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
540 struct bnxt_vnic_info *vnic;
542 if (bp->vnic_info == NULL)
545 vnic = &bp->vnic_info[0];
547 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
548 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
551 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
553 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
554 struct bnxt_vnic_info *vnic;
556 if (bp->vnic_info == NULL)
559 vnic = &bp->vnic_info[0];
561 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
562 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
565 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
567 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
568 struct bnxt_vnic_info *vnic;
570 if (bp->vnic_info == NULL)
573 vnic = &bp->vnic_info[0];
575 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
576 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
579 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
581 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
582 struct bnxt_vnic_info *vnic;
584 if (bp->vnic_info == NULL)
587 vnic = &bp->vnic_info[0];
589 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
590 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
593 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
594 struct rte_eth_rss_reta_entry64 *reta_conf,
597 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
598 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
599 struct bnxt_vnic_info *vnic;
602 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
605 if (reta_size != HW_HASH_INDEX_SIZE) {
606 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
607 "(%d) must equal the size supported by the hardware "
608 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
611 /* Update the RSS VNIC(s) */
612 for (i = 0; i < MAX_FF_POOLS; i++) {
613 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
614 memcpy(vnic->rss_table, reta_conf, reta_size);
616 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
622 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
623 struct rte_eth_rss_reta_entry64 *reta_conf,
626 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
627 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
629 /* Retrieve from the default VNIC */
632 if (!vnic->rss_table)
635 if (reta_size != HW_HASH_INDEX_SIZE) {
636 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
637 "(%d) must equal the size supported by the hardware "
638 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
641 /* EW - need to revisit here copying from u64 to u16 */
642 memcpy(reta_conf, vnic->rss_table, reta_size);
647 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
648 struct rte_eth_rss_conf *rss_conf)
650 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
651 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
652 struct bnxt_vnic_info *vnic;
653 uint16_t hash_type = 0;
657 * If RSS enablement were different than dev_configure,
658 * then return -EINVAL
660 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
661 if (!rss_conf->rss_hf)
664 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
667 if (rss_conf->rss_hf & ETH_RSS_IPV4)
668 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
669 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
670 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
671 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
672 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
673 if (rss_conf->rss_hf & ETH_RSS_IPV6)
674 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
675 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
676 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
677 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
678 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
680 /* Update the RSS VNIC(s) */
681 for (i = 0; i < MAX_FF_POOLS; i++) {
682 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
683 vnic->hash_type = hash_type;
686 * Use the supplied key if the key length is
687 * acceptable and the rss_key is not NULL
689 if (rss_conf->rss_key &&
690 rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
691 memcpy(vnic->rss_hash_key, rss_conf->rss_key,
692 rss_conf->rss_key_len);
694 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
700 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
701 struct rte_eth_rss_conf *rss_conf)
703 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
704 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
708 /* RSS configuration is the same for all VNICs */
709 if (vnic && vnic->rss_hash_key) {
710 if (rss_conf->rss_key) {
711 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
712 rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
713 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
716 hash_types = vnic->hash_type;
717 rss_conf->rss_hf = 0;
718 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
719 rss_conf->rss_hf |= ETH_RSS_IPV4;
720 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
722 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
723 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
725 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
727 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
728 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
730 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
732 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
733 rss_conf->rss_hf |= ETH_RSS_IPV6;
734 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
736 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
737 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
739 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
741 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
742 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
744 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
748 "Unknwon RSS config from firmware (%08x), RSS disabled",
753 rss_conf->rss_hf = 0;
758 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
759 struct rte_eth_fc_conf *fc_conf __rte_unused)
761 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
762 struct rte_eth_link link_info;
765 rc = bnxt_get_hwrm_link_config(bp, &link_info);
769 memset(fc_conf, 0, sizeof(*fc_conf));
770 if (bp->link_info.auto_pause)
771 fc_conf->autoneg = 1;
772 switch (bp->link_info.pause) {
774 fc_conf->mode = RTE_FC_NONE;
776 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
777 fc_conf->mode = RTE_FC_TX_PAUSE;
779 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
780 fc_conf->mode = RTE_FC_RX_PAUSE;
782 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
783 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
784 fc_conf->mode = RTE_FC_FULL;
790 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
791 struct rte_eth_fc_conf *fc_conf)
793 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
795 switch (fc_conf->mode) {
797 bp->link_info.auto_pause = 0;
798 bp->link_info.force_pause = 0;
800 case RTE_FC_RX_PAUSE:
801 if (fc_conf->autoneg) {
802 bp->link_info.auto_pause =
803 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
804 bp->link_info.force_pause = 0;
806 bp->link_info.auto_pause = 0;
807 bp->link_info.force_pause =
808 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
811 case RTE_FC_TX_PAUSE:
812 if (fc_conf->autoneg) {
813 bp->link_info.auto_pause =
814 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
815 bp->link_info.force_pause = 0;
817 bp->link_info.auto_pause = 0;
818 bp->link_info.force_pause =
819 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
823 if (fc_conf->autoneg) {
824 bp->link_info.auto_pause =
825 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
826 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
827 bp->link_info.force_pause = 0;
829 bp->link_info.auto_pause = 0;
830 bp->link_info.force_pause =
831 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
832 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
836 return bnxt_set_hwrm_link_config(bp, true);
843 static struct eth_dev_ops bnxt_dev_ops = {
844 .dev_infos_get = bnxt_dev_info_get_op,
845 .dev_close = bnxt_dev_close_op,
846 .dev_configure = bnxt_dev_configure_op,
847 .dev_start = bnxt_dev_start_op,
848 .dev_stop = bnxt_dev_stop_op,
849 .dev_set_link_up = bnxt_dev_set_link_up_op,
850 .dev_set_link_down = bnxt_dev_set_link_down_op,
851 .stats_get = bnxt_stats_get_op,
852 .stats_reset = bnxt_stats_reset_op,
853 .rx_queue_setup = bnxt_rx_queue_setup_op,
854 .rx_queue_release = bnxt_rx_queue_release_op,
855 .tx_queue_setup = bnxt_tx_queue_setup_op,
856 .tx_queue_release = bnxt_tx_queue_release_op,
857 .reta_update = bnxt_reta_update_op,
858 .reta_query = bnxt_reta_query_op,
859 .rss_hash_update = bnxt_rss_hash_update_op,
860 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
861 .link_update = bnxt_link_update_op,
862 .promiscuous_enable = bnxt_promiscuous_enable_op,
863 .promiscuous_disable = bnxt_promiscuous_disable_op,
864 .allmulticast_enable = bnxt_allmulticast_enable_op,
865 .allmulticast_disable = bnxt_allmulticast_disable_op,
866 .mac_addr_add = bnxt_mac_addr_add_op,
867 .mac_addr_remove = bnxt_mac_addr_remove_op,
868 .flow_ctrl_get = bnxt_flow_ctrl_get_op,
869 .flow_ctrl_set = bnxt_flow_ctrl_set_op,
872 static bool bnxt_vf_pciid(uint16_t id)
874 if (id == BROADCOM_DEV_ID_57304_VF ||
875 id == BROADCOM_DEV_ID_57406_VF)
880 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
883 struct bnxt *bp = eth_dev->data->dev_private;
885 /* enable device (incl. PCI PM wakeup), and bus-mastering */
886 if (!eth_dev->pci_dev->mem_resource[0].addr) {
888 "Cannot find PCI device base address, aborting\n");
890 goto init_err_disable;
893 bp->eth_dev = eth_dev;
894 bp->pdev = eth_dev->pci_dev;
896 bp->bar0 = (void *)eth_dev->pci_dev->mem_resource[0].addr;
898 RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
900 goto init_err_release;
914 bnxt_dev_init(struct rte_eth_dev *eth_dev)
916 static int version_printed;
920 if (version_printed++ == 0)
921 RTE_LOG(INFO, PMD, "%s", bnxt_version);
923 if (eth_dev->pci_dev->addr.function >= 2 &&
924 eth_dev->pci_dev->addr.function < 4) {
925 RTE_LOG(ERR, PMD, "Function not enabled %x:\n",
926 eth_dev->pci_dev->addr.function);
931 rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
932 bp = eth_dev->data->dev_private;
934 if (bnxt_vf_pciid(eth_dev->pci_dev->id.device_id))
935 bp->flags |= BNXT_FLAG_VF;
937 rc = bnxt_init_board(eth_dev);
940 "Board initialization failed rc: %x\n", rc);
943 eth_dev->dev_ops = &bnxt_dev_ops;
944 eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
945 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
947 rc = bnxt_alloc_hwrm_resources(bp);
950 "hwrm resource allocation failure rc: %x\n", rc);
953 rc = bnxt_hwrm_ver_get(bp);
956 bnxt_hwrm_queue_qportcfg(bp);
958 /* Get the MAX capabilities for this function */
959 rc = bnxt_hwrm_func_qcaps(bp);
961 RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
964 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
965 ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
966 if (eth_dev->data->mac_addrs == NULL) {
968 "Failed to alloc %u bytes needed to store MAC addr tbl",
969 ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR);
973 /* Copy the permanent MAC from the qcap response address now. */
975 memcpy(bp->mac_addr, bp->pf.mac_addr, sizeof(bp->mac_addr));
977 memcpy(bp->mac_addr, bp->vf.mac_addr, sizeof(bp->mac_addr));
978 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
979 bp->grp_info = rte_zmalloc("bnxt_grp_info",
980 sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
983 "Failed to alloc %zu bytes needed to store group info table\n",
984 sizeof(*bp->grp_info) * bp->max_ring_grps);
989 rc = bnxt_hwrm_func_driver_register(bp, 0,
993 "Failed to register driver");
999 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
1000 eth_dev->pci_dev->mem_resource[0].phys_addr,
1001 eth_dev->pci_dev->mem_resource[0].addr);
1006 eth_dev->driver->eth_dev_uninit(eth_dev);
1012 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
1013 struct bnxt *bp = eth_dev->data->dev_private;
1016 if (eth_dev->data->mac_addrs)
1017 rte_free(eth_dev->data->mac_addrs);
1019 rte_free(bp->grp_info);
1020 rc = bnxt_hwrm_func_driver_unregister(bp, 0);
1021 bnxt_free_hwrm_resources(bp);
1025 static struct eth_driver bnxt_rte_pmd = {
1027 .name = "rte_" DRV_MODULE_NAME "_pmd",
1028 .id_table = bnxt_pci_id_map,
1029 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1031 .eth_dev_init = bnxt_dev_init,
1032 .eth_dev_uninit = bnxt_dev_uninit,
1033 .dev_private_size = sizeof(struct bnxt),
1036 static int bnxt_rte_pmd_init(const char *name, const char *params __rte_unused)
1038 RTE_LOG(INFO, PMD, "bnxt_rte_pmd_init() called for %s\n", name);
1039 rte_eth_driver_register(&bnxt_rte_pmd);
1043 static struct rte_driver bnxt_pmd_drv = {
1046 .init = bnxt_rte_pmd_init,
1049 PMD_REGISTER_DRIVER(bnxt_pmd_drv);