4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_pci.h>
40 #include <rte_malloc.h>
41 #include <rte_cycles.h>
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
48 #include "bnxt_ring.h"
51 #include "bnxt_stats.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
57 #define DRV_MODULE_NAME "bnxt"
58 static const char bnxt_version[] =
59 "Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
61 #define PCI_VENDOR_ID_BROADCOM 0x14E4
63 #define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609
64 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
65 #define BROADCOM_DEV_ID_57414_VF 0x16c1
66 #define BROADCOM_DEV_ID_57301 0x16c8
67 #define BROADCOM_DEV_ID_57302 0x16c9
68 #define BROADCOM_DEV_ID_57304_PF 0x16ca
69 #define BROADCOM_DEV_ID_57304_VF 0x16cb
70 #define BROADCOM_DEV_ID_57417_MF 0x16cc
71 #define BROADCOM_DEV_ID_NS2 0x16cd
72 #define BROADCOM_DEV_ID_57311 0x16ce
73 #define BROADCOM_DEV_ID_57312 0x16cf
74 #define BROADCOM_DEV_ID_57402 0x16d0
75 #define BROADCOM_DEV_ID_57404 0x16d1
76 #define BROADCOM_DEV_ID_57406_PF 0x16d2
77 #define BROADCOM_DEV_ID_57406_VF 0x16d3
78 #define BROADCOM_DEV_ID_57402_MF 0x16d4
79 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
80 #define BROADCOM_DEV_ID_57412 0x16d6
81 #define BROADCOM_DEV_ID_57414 0x16d7
82 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8
83 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9
84 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
85 #define BROADCOM_DEV_ID_57412_MF 0x16de
86 #define BROADCOM_DEV_ID_57314 0x16df
87 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0
88 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
89 #define BROADCOM_DEV_ID_57417_SFP 0x16e2
90 #define BROADCOM_DEV_ID_57416_SFP 0x16e3
91 #define BROADCOM_DEV_ID_57317_SFP 0x16e4
92 #define BROADCOM_DEV_ID_57404_MF 0x16e7
93 #define BROADCOM_DEV_ID_57406_MF 0x16e8
94 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
95 #define BROADCOM_DEV_ID_57407_MF 0x16ea
96 #define BROADCOM_DEV_ID_57414_MF 0x16ec
97 #define BROADCOM_DEV_ID_57416_MF 0x16ee
99 static const struct rte_pci_id bnxt_pci_id_map[] = {
100 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
101 BROADCOM_DEV_ID_STRATUS_NIC_VF) },
102 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
103 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
105 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
106 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
107 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
111 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
113 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
136 { .vendor_id = 0, /* sentinel */ },
139 #define BNXT_ETH_RSS_SUPPORT ( \
141 ETH_RSS_NONFRAG_IPV4_TCP | \
142 ETH_RSS_NONFRAG_IPV4_UDP | \
144 ETH_RSS_NONFRAG_IPV6_TCP | \
145 ETH_RSS_NONFRAG_IPV6_UDP)
147 static void bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
149 /***********************/
152 * High level utility functions
155 static void bnxt_free_mem(struct bnxt *bp)
157 bnxt_free_filter_mem(bp);
158 bnxt_free_vnic_attributes(bp);
159 bnxt_free_vnic_mem(bp);
162 bnxt_free_tx_rings(bp);
163 bnxt_free_rx_rings(bp);
164 bnxt_free_def_cp_ring(bp);
167 static int bnxt_alloc_mem(struct bnxt *bp)
171 /* Default completion ring */
172 rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
176 rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
177 bp->def_cp_ring, "def_cp");
181 rc = bnxt_alloc_vnic_mem(bp);
185 rc = bnxt_alloc_vnic_attributes(bp);
189 rc = bnxt_alloc_filter_mem(bp);
200 static int bnxt_init_chip(struct bnxt *bp)
202 unsigned int i, rss_idx, fw_idx;
203 struct rte_eth_link new;
206 if (bp->eth_dev->data->mtu > ETHER_MTU) {
207 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
208 bp->flags |= BNXT_FLAG_JUMBO;
210 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
211 bp->flags &= ~BNXT_FLAG_JUMBO;
214 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
216 RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
220 rc = bnxt_alloc_hwrm_rings(bp);
222 RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
226 rc = bnxt_alloc_all_hwrm_ring_grps(bp);
228 RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
232 rc = bnxt_mq_rx_configure(bp);
234 RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
238 /* VNIC configuration */
239 for (i = 0; i < bp->nr_vnics; i++) {
240 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
242 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
244 RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
249 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
252 "HWRM vnic %d ctx alloc failure rc: %x\n",
257 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
259 RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
264 rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
267 "HWRM vnic %d filter failure rc: %x\n",
271 if (vnic->rss_table && vnic->hash_type) {
273 * Fill the RSS hash & redirection table with
274 * ring group ids for all VNICs
276 for (rss_idx = 0, fw_idx = 0;
277 rss_idx < HW_HASH_INDEX_SIZE;
278 rss_idx++, fw_idx++) {
279 if (vnic->fw_grp_ids[fw_idx] ==
282 vnic->rss_table[rss_idx] =
283 vnic->fw_grp_ids[fw_idx];
285 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
288 "HWRM vnic %d set RSS failure rc: %x\n",
294 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
296 if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
297 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
299 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
301 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
304 "HWRM cfa l2 rx mask failure rc: %x\n", rc);
308 rc = bnxt_get_hwrm_link_config(bp, &new);
310 RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
314 if (!bp->link_info.link_up) {
315 rc = bnxt_set_hwrm_link_config(bp, true);
318 "HWRM link config failure rc: %x\n", rc);
326 bnxt_free_all_hwrm_resources(bp);
331 static int bnxt_shutdown_nic(struct bnxt *bp)
333 bnxt_free_all_hwrm_resources(bp);
334 bnxt_free_all_filters(bp);
335 bnxt_free_all_vnics(bp);
339 static int bnxt_init_nic(struct bnxt *bp)
343 bnxt_init_ring_grps(bp);
345 bnxt_init_filters(bp);
347 rc = bnxt_init_chip(bp);
355 * Device configuration and status function
358 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
359 struct rte_eth_dev_info *dev_info)
361 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
362 uint16_t max_vnics, i, j, vpool, vrxq;
363 unsigned int max_rx_rings;
365 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
368 dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
369 dev_info->max_hash_mac_addrs = 0;
371 /* PF/VF specifics */
373 dev_info->max_vfs = bp->pdev->max_vfs;
374 max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
375 RTE_MIN(bp->max_rsscos_ctx,
377 /* For the sake of symmetry, max_rx_queues = max_tx_queues */
378 dev_info->max_rx_queues = max_rx_rings;
379 dev_info->max_tx_queues = max_rx_rings;
380 dev_info->reta_size = bp->max_rsscos_ctx;
381 max_vnics = bp->max_vnics;
383 /* Fast path specifics */
384 dev_info->min_rx_bufsize = 1;
385 dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
387 dev_info->rx_offload_capa = 0;
388 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
389 DEV_TX_OFFLOAD_TCP_CKSUM |
390 DEV_TX_OFFLOAD_UDP_CKSUM |
391 DEV_TX_OFFLOAD_TCP_TSO |
392 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
393 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
394 DEV_TX_OFFLOAD_GRE_TNL_TSO |
395 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
396 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
399 dev_info->default_rxconf = (struct rte_eth_rxconf) {
405 .rx_free_thresh = 32,
409 dev_info->default_txconf = (struct rte_eth_txconf) {
415 .tx_free_thresh = 32,
417 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
418 ETH_TXQ_FLAGS_NOOFFLOADS,
420 eth_dev->data->dev_conf.intr_conf.lsc = 1;
425 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
426 * need further investigation.
430 vpool = 64; /* ETH_64_POOLS */
431 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
432 for (i = 0; i < 4; vpool >>= 1, i++) {
433 if (max_vnics > vpool) {
434 for (j = 0; j < 5; vrxq >>= 1, j++) {
435 if (dev_info->max_rx_queues > vrxq) {
441 /* Not enough resources to support VMDq */
445 /* Not enough resources to support VMDq */
449 dev_info->max_vmdq_pools = vpool;
450 dev_info->vmdq_queue_num = vrxq;
452 dev_info->vmdq_pool_base = 0;
453 dev_info->vmdq_queue_base = 0;
456 /* Configure the device based on the configuration provided */
457 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
459 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
461 bp->rx_queues = (void *)eth_dev->data->rx_queues;
462 bp->tx_queues = (void *)eth_dev->data->tx_queues;
464 /* Inherit new configurations */
465 bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
466 bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
467 bp->rx_cp_nr_rings = bp->rx_nr_rings;
468 bp->tx_cp_nr_rings = bp->tx_nr_rings;
470 if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
472 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
473 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
478 rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev,
479 struct rte_eth_link *link)
481 struct rte_eth_link *dst = ð_dev->data->dev_link;
482 struct rte_eth_link *src = link;
484 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
485 *(uint64_t *)src) == 0)
491 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
493 struct rte_eth_link *link = ð_dev->data->dev_link;
495 if (link->link_status)
496 RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
497 (uint8_t)(eth_dev->data->port_id),
498 (uint32_t)link->link_speed,
499 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
500 ("full-duplex") : ("half-duplex\n"));
502 RTE_LOG(INFO, PMD, "Port %d Link Down\n",
503 (uint8_t)(eth_dev->data->port_id));
506 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
508 bnxt_print_link_info(eth_dev);
512 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
514 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
520 rc = bnxt_init_nic(bp);
524 bnxt_link_update_op(eth_dev, 0);
526 if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
527 vlan_mask |= ETH_VLAN_FILTER_MASK;
528 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
529 vlan_mask |= ETH_VLAN_STRIP_MASK;
530 bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
535 bnxt_shutdown_nic(bp);
536 bnxt_free_tx_mbufs(bp);
537 bnxt_free_rx_mbufs(bp);
541 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
543 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
545 eth_dev->data->dev_link.link_status = 1;
546 bnxt_set_hwrm_link_config(bp, true);
550 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
552 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
554 eth_dev->data->dev_link.link_status = 0;
555 bnxt_set_hwrm_link_config(bp, false);
559 /* Unload the driver, release resources */
560 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
562 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
564 if (bp->eth_dev->data->dev_started) {
565 /* TBD: STOP HW queues DMA */
566 eth_dev->data->dev_link.link_status = 0;
568 bnxt_set_hwrm_link_config(bp, false);
569 bnxt_hwrm_port_clr_stats(bp);
570 bnxt_shutdown_nic(bp);
574 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
576 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
578 if (bp->dev_stopped == 0)
579 bnxt_dev_stop_op(eth_dev);
581 bnxt_free_tx_mbufs(bp);
582 bnxt_free_rx_mbufs(bp);
584 if (eth_dev->data->mac_addrs != NULL) {
585 rte_free(eth_dev->data->mac_addrs);
586 eth_dev->data->mac_addrs = NULL;
588 if (bp->grp_info != NULL) {
589 rte_free(bp->grp_info);
594 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
597 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
598 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
599 struct bnxt_vnic_info *vnic;
600 struct bnxt_filter_info *filter, *temp_filter;
604 * Loop through all VNICs from the specified filter flow pools to
605 * remove the corresponding MAC addr filter
607 for (i = 0; i < MAX_FF_POOLS; i++) {
608 if (!(pool_mask & (1ULL << i)))
611 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
612 filter = STAILQ_FIRST(&vnic->filter);
614 temp_filter = STAILQ_NEXT(filter, next);
615 if (filter->mac_index == index) {
616 STAILQ_REMOVE(&vnic->filter, filter,
617 bnxt_filter_info, next);
618 bnxt_hwrm_clear_filter(bp, filter);
619 filter->mac_index = INVALID_MAC_INDEX;
620 memset(&filter->l2_addr, 0,
623 &bp->free_filter_list,
626 filter = temp_filter;
632 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
633 struct ether_addr *mac_addr,
634 uint32_t index, uint32_t pool)
636 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
637 struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
638 struct bnxt_filter_info *filter;
641 RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
646 RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
649 /* Attach requested MAC address to the new l2_filter */
650 STAILQ_FOREACH(filter, &vnic->filter, next) {
651 if (filter->mac_index == index) {
653 "MAC addr already existed for pool %d\n", pool);
657 filter = bnxt_alloc_filter(bp);
659 RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
662 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
663 filter->mac_index = index;
664 memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
665 return bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
668 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
671 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
672 struct rte_eth_link new;
673 unsigned int cnt = BNXT_LINK_WAIT_CNT;
675 memset(&new, 0, sizeof(new));
677 /* Retrieve link info from hardware */
678 rc = bnxt_get_hwrm_link_config(bp, &new);
680 new.link_speed = ETH_LINK_SPEED_100M;
681 new.link_duplex = ETH_LINK_FULL_DUPLEX;
683 "Failed to retrieve link rc = 0x%x!\n", rc);
686 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
688 if (!wait_to_complete)
690 } while (!new.link_status && cnt--);
693 /* Timed out or success */
694 if (new.link_status != eth_dev->data->dev_link.link_status ||
695 new.link_speed != eth_dev->data->dev_link.link_speed) {
696 rte_bnxt_atomic_write_link_status(eth_dev, &new);
697 bnxt_print_link_info(eth_dev);
703 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
705 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
706 struct bnxt_vnic_info *vnic;
708 if (bp->vnic_info == NULL)
711 vnic = &bp->vnic_info[0];
713 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
714 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
717 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
719 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
720 struct bnxt_vnic_info *vnic;
722 if (bp->vnic_info == NULL)
725 vnic = &bp->vnic_info[0];
727 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
728 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
731 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
733 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
734 struct bnxt_vnic_info *vnic;
736 if (bp->vnic_info == NULL)
739 vnic = &bp->vnic_info[0];
741 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
742 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
745 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
747 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
748 struct bnxt_vnic_info *vnic;
750 if (bp->vnic_info == NULL)
753 vnic = &bp->vnic_info[0];
755 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
756 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
759 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
760 struct rte_eth_rss_reta_entry64 *reta_conf,
763 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
764 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
765 struct bnxt_vnic_info *vnic;
768 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
771 if (reta_size != HW_HASH_INDEX_SIZE) {
772 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
773 "(%d) must equal the size supported by the hardware "
774 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
777 /* Update the RSS VNIC(s) */
778 for (i = 0; i < MAX_FF_POOLS; i++) {
779 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
780 memcpy(vnic->rss_table, reta_conf, reta_size);
782 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
788 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
789 struct rte_eth_rss_reta_entry64 *reta_conf,
792 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
793 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
794 struct rte_intr_handle *intr_handle
795 = &bp->pdev->intr_handle;
797 /* Retrieve from the default VNIC */
800 if (!vnic->rss_table)
803 if (reta_size != HW_HASH_INDEX_SIZE) {
804 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
805 "(%d) must equal the size supported by the hardware "
806 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
809 /* EW - need to revisit here copying from u64 to u16 */
810 memcpy(reta_conf, vnic->rss_table, reta_size);
812 if (rte_intr_allow_others(intr_handle)) {
813 if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
814 bnxt_dev_lsc_intr_setup(eth_dev);
820 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
821 struct rte_eth_rss_conf *rss_conf)
823 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
824 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
825 struct bnxt_vnic_info *vnic;
826 uint16_t hash_type = 0;
830 * If RSS enablement were different than dev_configure,
831 * then return -EINVAL
833 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
834 if (!rss_conf->rss_hf)
835 RTE_LOG(ERR, PMD, "Hash type NONE\n");
837 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
840 if (rss_conf->rss_hf & ETH_RSS_IPV4)
841 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
842 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
843 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
844 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
845 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
846 if (rss_conf->rss_hf & ETH_RSS_IPV6)
847 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
848 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
849 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
850 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
851 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
853 /* Update the RSS VNIC(s) */
854 for (i = 0; i < MAX_FF_POOLS; i++) {
855 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
856 vnic->hash_type = hash_type;
859 * Use the supplied key if the key length is
860 * acceptable and the rss_key is not NULL
862 if (rss_conf->rss_key &&
863 rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
864 memcpy(vnic->rss_hash_key, rss_conf->rss_key,
865 rss_conf->rss_key_len);
867 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
873 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
874 struct rte_eth_rss_conf *rss_conf)
876 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
877 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
881 /* RSS configuration is the same for all VNICs */
882 if (vnic && vnic->rss_hash_key) {
883 if (rss_conf->rss_key) {
884 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
885 rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
886 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
889 hash_types = vnic->hash_type;
890 rss_conf->rss_hf = 0;
891 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
892 rss_conf->rss_hf |= ETH_RSS_IPV4;
893 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
895 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
896 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
898 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
900 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
901 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
903 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
905 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
906 rss_conf->rss_hf |= ETH_RSS_IPV6;
907 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
909 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
910 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
912 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
914 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
915 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
917 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
921 "Unknwon RSS config from firmware (%08x), RSS disabled",
926 rss_conf->rss_hf = 0;
931 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
932 struct rte_eth_fc_conf *fc_conf)
934 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
935 struct rte_eth_link link_info;
938 rc = bnxt_get_hwrm_link_config(bp, &link_info);
942 memset(fc_conf, 0, sizeof(*fc_conf));
943 if (bp->link_info.auto_pause)
944 fc_conf->autoneg = 1;
945 switch (bp->link_info.pause) {
947 fc_conf->mode = RTE_FC_NONE;
949 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
950 fc_conf->mode = RTE_FC_TX_PAUSE;
952 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
953 fc_conf->mode = RTE_FC_RX_PAUSE;
955 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
956 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
957 fc_conf->mode = RTE_FC_FULL;
963 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
964 struct rte_eth_fc_conf *fc_conf)
966 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
968 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
969 RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
973 switch (fc_conf->mode) {
975 bp->link_info.auto_pause = 0;
976 bp->link_info.force_pause = 0;
978 case RTE_FC_RX_PAUSE:
979 if (fc_conf->autoneg) {
980 bp->link_info.auto_pause =
981 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
982 bp->link_info.force_pause = 0;
984 bp->link_info.auto_pause = 0;
985 bp->link_info.force_pause =
986 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
989 case RTE_FC_TX_PAUSE:
990 if (fc_conf->autoneg) {
991 bp->link_info.auto_pause =
992 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
993 bp->link_info.force_pause = 0;
995 bp->link_info.auto_pause = 0;
996 bp->link_info.force_pause =
997 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1001 if (fc_conf->autoneg) {
1002 bp->link_info.auto_pause =
1003 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1004 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1005 bp->link_info.force_pause = 0;
1007 bp->link_info.auto_pause = 0;
1008 bp->link_info.force_pause =
1009 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1010 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1014 return bnxt_set_hwrm_link_config(bp, true);
1017 /* Add UDP tunneling port */
1019 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1020 struct rte_eth_udp_tunnel *udp_tunnel)
1022 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1023 uint16_t tunnel_type = 0;
1026 switch (udp_tunnel->prot_type) {
1027 case RTE_TUNNEL_TYPE_VXLAN:
1028 if (bp->vxlan_port_cnt) {
1029 RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
1030 udp_tunnel->udp_port);
1031 if (bp->vxlan_port != udp_tunnel->udp_port) {
1032 RTE_LOG(ERR, PMD, "Only one port allowed\n");
1035 bp->vxlan_port_cnt++;
1039 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
1040 bp->vxlan_port_cnt++;
1042 case RTE_TUNNEL_TYPE_GENEVE:
1043 if (bp->geneve_port_cnt) {
1044 RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
1045 udp_tunnel->udp_port);
1046 if (bp->geneve_port != udp_tunnel->udp_port) {
1047 RTE_LOG(ERR, PMD, "Only one port allowed\n");
1050 bp->geneve_port_cnt++;
1054 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
1055 bp->geneve_port_cnt++;
1058 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
1061 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
1067 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
1068 struct rte_eth_udp_tunnel *udp_tunnel)
1070 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1071 uint16_t tunnel_type = 0;
1075 switch (udp_tunnel->prot_type) {
1076 case RTE_TUNNEL_TYPE_VXLAN:
1077 if (!bp->vxlan_port_cnt) {
1078 RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
1081 if (bp->vxlan_port != udp_tunnel->udp_port) {
1082 RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
1083 udp_tunnel->udp_port, bp->vxlan_port);
1086 if (--bp->vxlan_port_cnt)
1090 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
1091 port = bp->vxlan_fw_dst_port_id;
1093 case RTE_TUNNEL_TYPE_GENEVE:
1094 if (!bp->geneve_port_cnt) {
1095 RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
1098 if (bp->geneve_port != udp_tunnel->udp_port) {
1099 RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
1100 udp_tunnel->udp_port, bp->geneve_port);
1103 if (--bp->geneve_port_cnt)
1107 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
1108 port = bp->geneve_fw_dst_port_id;
1111 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
1115 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
1118 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
1121 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
1122 bp->geneve_port = 0;
1127 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1129 struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1130 struct bnxt_vnic_info *vnic;
1133 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1135 /* Cycle through all VNICs */
1136 for (i = 0; i < bp->nr_vnics; i++) {
1138 * For each VNIC and each associated filter(s)
1139 * if VLAN exists && VLAN matches vlan_id
1140 * remove the MAC+VLAN filter
1141 * add a new MAC only filter
1143 * VLAN filter doesn't exist, just skip and continue
1145 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1146 filter = STAILQ_FIRST(&vnic->filter);
1148 temp_filter = STAILQ_NEXT(filter, next);
1150 if (filter->enables & chk &&
1151 filter->l2_ovlan == vlan_id) {
1152 /* Must delete the filter */
1153 STAILQ_REMOVE(&vnic->filter, filter,
1154 bnxt_filter_info, next);
1155 bnxt_hwrm_clear_filter(bp, filter);
1157 &bp->free_filter_list,
1161 * Need to examine to see if the MAC
1162 * filter already existed or not before
1163 * allocating a new one
1166 new_filter = bnxt_alloc_filter(bp);
1169 "MAC/VLAN filter alloc failed\n");
1173 STAILQ_INSERT_TAIL(&vnic->filter,
1175 /* Inherit MAC from previous filter */
1176 new_filter->mac_index =
1178 memcpy(new_filter->l2_addr,
1179 filter->l2_addr, ETHER_ADDR_LEN);
1180 /* MAC only filter */
1181 rc = bnxt_hwrm_set_filter(bp,
1187 "Del Vlan filter for %d\n",
1190 filter = temp_filter;
1198 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1200 struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1201 struct bnxt_vnic_info *vnic;
1204 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
1205 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
1206 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1208 /* Cycle through all VNICs */
1209 for (i = 0; i < bp->nr_vnics; i++) {
1211 * For each VNIC and each associated filter(s)
1213 * if VLAN matches vlan_id
1214 * VLAN filter already exists, just skip and continue
1216 * add a new MAC+VLAN filter
1218 * Remove the old MAC only filter
1219 * Add a new MAC+VLAN filter
1221 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1222 filter = STAILQ_FIRST(&vnic->filter);
1224 temp_filter = STAILQ_NEXT(filter, next);
1226 if (filter->enables & chk) {
1227 if (filter->l2_ovlan == vlan_id)
1230 /* Must delete the MAC filter */
1231 STAILQ_REMOVE(&vnic->filter, filter,
1232 bnxt_filter_info, next);
1233 bnxt_hwrm_clear_filter(bp, filter);
1234 filter->l2_ovlan = 0;
1236 &bp->free_filter_list,
1239 new_filter = bnxt_alloc_filter(bp);
1242 "MAC/VLAN filter alloc failed\n");
1246 STAILQ_INSERT_TAIL(&vnic->filter, new_filter,
1248 /* Inherit MAC from the previous filter */
1249 new_filter->mac_index = filter->mac_index;
1250 memcpy(new_filter->l2_addr, filter->l2_addr,
1252 /* MAC + VLAN ID filter */
1253 new_filter->l2_ovlan = vlan_id;
1254 new_filter->l2_ovlan_mask = 0xF000;
1255 new_filter->enables |= en;
1256 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id,
1261 "Added Vlan filter for %d\n", vlan_id);
1263 filter = temp_filter;
1271 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
1272 uint16_t vlan_id, int on)
1274 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1276 /* These operations apply to ALL existing MAC/VLAN filters */
1278 return bnxt_add_vlan_filter(bp, vlan_id);
1280 return bnxt_del_vlan_filter(bp, vlan_id);
1284 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
1286 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1289 if (mask & ETH_VLAN_FILTER_MASK) {
1290 if (!dev->data->dev_conf.rxmode.hw_vlan_filter) {
1291 /* Remove any VLAN filters programmed */
1292 for (i = 0; i < 4095; i++)
1293 bnxt_del_vlan_filter(bp, i);
1295 RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n",
1296 dev->data->dev_conf.rxmode.hw_vlan_filter);
1299 if (mask & ETH_VLAN_STRIP_MASK) {
1300 /* Enable or disable VLAN stripping */
1301 for (i = 0; i < bp->nr_vnics; i++) {
1302 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1303 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1304 vnic->vlan_strip = true;
1306 vnic->vlan_strip = false;
1307 bnxt_hwrm_vnic_cfg(bp, vnic);
1309 RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n",
1310 dev->data->dev_conf.rxmode.hw_vlan_strip);
1313 if (mask & ETH_VLAN_EXTEND_MASK)
1314 RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
1318 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
1320 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1321 /* Default Filter is tied to VNIC 0 */
1322 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
1323 struct bnxt_filter_info *filter;
1329 memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
1330 memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
1332 STAILQ_FOREACH(filter, &vnic->filter, next) {
1333 /* Default Filter is at Index 0 */
1334 if (filter->mac_index != 0)
1336 rc = bnxt_hwrm_clear_filter(bp, filter);
1339 memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
1340 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
1341 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
1343 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
1344 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
1345 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1348 filter->mac_index = 0;
1349 RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
1354 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
1355 struct ether_addr *mc_addr_set,
1356 uint32_t nb_mc_addr)
1358 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1359 char *mc_addr_list = (char *)mc_addr_set;
1360 struct bnxt_vnic_info *vnic;
1361 uint32_t off = 0, i = 0;
1363 vnic = &bp->vnic_info[0];
1365 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
1366 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1370 /* TODO Check for Duplicate mcast addresses */
1371 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1372 for (i = 0; i < nb_mc_addr; i++) {
1373 memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
1374 off += ETHER_ADDR_LEN;
1377 vnic->mc_addr_cnt = i;
1380 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1384 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1386 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1387 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
1388 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
1389 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
1392 ret = snprintf(fw_version, fw_size, "%d.%d.%d",
1393 fw_major, fw_minor, fw_updt);
1395 ret += 1; /* add the size of '\0' */
1396 if (fw_size < (uint32_t)ret)
1403 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1404 struct rte_eth_rxq_info *qinfo)
1406 struct bnxt_rx_queue *rxq;
1408 rxq = dev->data->rx_queues[queue_id];
1410 qinfo->mp = rxq->mb_pool;
1411 qinfo->scattered_rx = dev->data->scattered_rx;
1412 qinfo->nb_desc = rxq->nb_rx_desc;
1414 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1415 qinfo->conf.rx_drop_en = 0;
1416 qinfo->conf.rx_deferred_start = 0;
1420 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1421 struct rte_eth_txq_info *qinfo)
1423 struct bnxt_tx_queue *txq;
1425 txq = dev->data->tx_queues[queue_id];
1427 qinfo->nb_desc = txq->nb_tx_desc;
1429 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1430 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1431 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1433 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1434 qinfo->conf.tx_rs_thresh = 0;
1435 qinfo->conf.txq_flags = txq->txq_flags;
1436 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1439 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
1441 struct bnxt *bp = eth_dev->data->dev_private;
1442 struct rte_eth_dev_info dev_info;
1443 uint32_t max_dev_mtu;
1447 bnxt_dev_info_get_op(eth_dev, &dev_info);
1448 max_dev_mtu = dev_info.max_rx_pktlen -
1449 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
1451 if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
1452 RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
1453 ETHER_MIN_MTU, max_dev_mtu);
1458 if (new_mtu > ETHER_MTU) {
1459 bp->flags |= BNXT_FLAG_JUMBO;
1460 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
1462 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
1463 bp->flags &= ~BNXT_FLAG_JUMBO;
1466 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
1467 new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1469 eth_dev->data->mtu = new_mtu;
1470 RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu);
1472 for (i = 0; i < bp->nr_vnics; i++) {
1473 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1475 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1476 ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1477 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
1481 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
1490 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
1492 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1493 uint16_t vlan = bp->vlan;
1496 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
1498 "PVID cannot be modified for this function\n");
1501 bp->vlan = on ? pvid : 0;
1503 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
1510 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
1512 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1514 return bnxt_hwrm_port_led_cfg(bp, true);
1518 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
1520 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1522 return bnxt_hwrm_port_led_cfg(bp, false);
1529 static const struct eth_dev_ops bnxt_dev_ops = {
1530 .dev_infos_get = bnxt_dev_info_get_op,
1531 .dev_close = bnxt_dev_close_op,
1532 .dev_configure = bnxt_dev_configure_op,
1533 .dev_start = bnxt_dev_start_op,
1534 .dev_stop = bnxt_dev_stop_op,
1535 .dev_set_link_up = bnxt_dev_set_link_up_op,
1536 .dev_set_link_down = bnxt_dev_set_link_down_op,
1537 .stats_get = bnxt_stats_get_op,
1538 .stats_reset = bnxt_stats_reset_op,
1539 .rx_queue_setup = bnxt_rx_queue_setup_op,
1540 .rx_queue_release = bnxt_rx_queue_release_op,
1541 .tx_queue_setup = bnxt_tx_queue_setup_op,
1542 .tx_queue_release = bnxt_tx_queue_release_op,
1543 .reta_update = bnxt_reta_update_op,
1544 .reta_query = bnxt_reta_query_op,
1545 .rss_hash_update = bnxt_rss_hash_update_op,
1546 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
1547 .link_update = bnxt_link_update_op,
1548 .promiscuous_enable = bnxt_promiscuous_enable_op,
1549 .promiscuous_disable = bnxt_promiscuous_disable_op,
1550 .allmulticast_enable = bnxt_allmulticast_enable_op,
1551 .allmulticast_disable = bnxt_allmulticast_disable_op,
1552 .mac_addr_add = bnxt_mac_addr_add_op,
1553 .mac_addr_remove = bnxt_mac_addr_remove_op,
1554 .flow_ctrl_get = bnxt_flow_ctrl_get_op,
1555 .flow_ctrl_set = bnxt_flow_ctrl_set_op,
1556 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op,
1557 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op,
1558 .vlan_filter_set = bnxt_vlan_filter_set_op,
1559 .vlan_offload_set = bnxt_vlan_offload_set_op,
1560 .vlan_pvid_set = bnxt_vlan_pvid_set_op,
1561 .mtu_set = bnxt_mtu_set_op,
1562 .mac_addr_set = bnxt_set_default_mac_addr_op,
1563 .xstats_get = bnxt_dev_xstats_get_op,
1564 .xstats_get_names = bnxt_dev_xstats_get_names_op,
1565 .xstats_reset = bnxt_dev_xstats_reset_op,
1566 .fw_version_get = bnxt_fw_version_get,
1567 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
1568 .rxq_info_get = bnxt_rxq_info_get_op,
1569 .txq_info_get = bnxt_txq_info_get_op,
1570 .dev_led_on = bnxt_dev_led_on_op,
1571 .dev_led_off = bnxt_dev_led_off_op,
1572 .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
1573 .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
1576 static bool bnxt_vf_pciid(uint16_t id)
1578 if (id == BROADCOM_DEV_ID_57304_VF ||
1579 id == BROADCOM_DEV_ID_57406_VF ||
1580 id == BROADCOM_DEV_ID_5731X_VF ||
1581 id == BROADCOM_DEV_ID_5741X_VF ||
1582 id == BROADCOM_DEV_ID_57414_VF ||
1583 id == BROADCOM_DEV_ID_STRATUS_NIC_VF)
1588 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
1590 struct bnxt *bp = eth_dev->data->dev_private;
1591 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1594 /* enable device (incl. PCI PM wakeup), and bus-mastering */
1595 if (!pci_dev->mem_resource[0].addr) {
1597 "Cannot find PCI device base address, aborting\n");
1599 goto init_err_disable;
1602 bp->eth_dev = eth_dev;
1605 bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
1607 RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
1609 goto init_err_release;
1622 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
1624 #define ALLOW_FUNC(x) \
1626 typeof(x) arg = (x); \
1627 bp->pf.vf_req_fwd[((arg) >> 5)] &= \
1628 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
1631 bnxt_dev_init(struct rte_eth_dev *eth_dev)
1633 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1634 char mz_name[RTE_MEMZONE_NAMESIZE];
1635 const struct rte_memzone *mz = NULL;
1636 static int version_printed;
1637 uint32_t total_alloc_len;
1638 phys_addr_t mz_phys_addr;
1642 if (version_printed++ == 0)
1643 RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
1645 rte_eth_copy_pci_info(eth_dev, pci_dev);
1646 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1648 bp = eth_dev->data->dev_private;
1650 rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
1651 bp->dev_stopped = 1;
1653 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1656 if (bnxt_vf_pciid(pci_dev->id.device_id))
1657 bp->flags |= BNXT_FLAG_VF;
1659 rc = bnxt_init_board(eth_dev);
1662 "Board initialization failed rc: %x\n", rc);
1666 eth_dev->dev_ops = &bnxt_dev_ops;
1667 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1669 eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
1670 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
1672 if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
1673 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
1674 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
1675 pci_dev->addr.bus, pci_dev->addr.devid,
1676 pci_dev->addr.function, "rx_port_stats");
1677 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
1678 mz = rte_memzone_lookup(mz_name);
1679 total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
1680 sizeof(struct rx_port_stats) + 512);
1682 mz = rte_memzone_reserve(mz_name, total_alloc_len,
1685 RTE_MEMZONE_SIZE_HINT_ONLY);
1689 memset(mz->addr, 0, mz->len);
1690 mz_phys_addr = mz->phys_addr;
1691 if ((unsigned long)mz->addr == mz_phys_addr) {
1692 RTE_LOG(WARNING, PMD,
1693 "Memzone physical address same as virtual.\n");
1694 RTE_LOG(WARNING, PMD,
1695 "Using rte_mem_virt2phy()\n");
1696 mz_phys_addr = rte_mem_virt2phy(mz->addr);
1697 if (mz_phys_addr == 0) {
1699 "unable to map address to physical memory\n");
1704 bp->rx_mem_zone = (const void *)mz;
1705 bp->hw_rx_port_stats = mz->addr;
1706 bp->hw_rx_port_stats_map = mz_phys_addr;
1708 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
1709 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
1710 pci_dev->addr.bus, pci_dev->addr.devid,
1711 pci_dev->addr.function, "tx_port_stats");
1712 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
1713 mz = rte_memzone_lookup(mz_name);
1714 total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
1715 sizeof(struct tx_port_stats) + 512);
1717 mz = rte_memzone_reserve(mz_name, total_alloc_len,
1720 RTE_MEMZONE_SIZE_HINT_ONLY);
1724 memset(mz->addr, 0, mz->len);
1725 mz_phys_addr = mz->phys_addr;
1726 if ((unsigned long)mz->addr == mz_phys_addr) {
1727 RTE_LOG(WARNING, PMD,
1728 "Memzone physical address same as virtual.\n");
1729 RTE_LOG(WARNING, PMD,
1730 "Using rte_mem_virt2phy()\n");
1731 mz_phys_addr = rte_mem_virt2phy(mz->addr);
1732 if (mz_phys_addr == 0) {
1734 "unable to map address to physical memory\n");
1739 bp->tx_mem_zone = (const void *)mz;
1740 bp->hw_tx_port_stats = mz->addr;
1741 bp->hw_tx_port_stats_map = mz_phys_addr;
1743 bp->flags |= BNXT_FLAG_PORT_STATS;
1746 rc = bnxt_alloc_hwrm_resources(bp);
1749 "hwrm resource allocation failure rc: %x\n", rc);
1752 rc = bnxt_hwrm_ver_get(bp);
1755 bnxt_hwrm_queue_qportcfg(bp);
1757 bnxt_hwrm_func_qcfg(bp);
1759 /* Get the MAX capabilities for this function */
1760 rc = bnxt_hwrm_func_qcaps(bp);
1762 RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
1765 if (bp->max_tx_rings == 0) {
1766 RTE_LOG(ERR, PMD, "No TX rings available!\n");
1770 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
1771 ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
1772 if (eth_dev->data->mac_addrs == NULL) {
1774 "Failed to alloc %u bytes needed to store MAC addr tbl",
1775 ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR);
1779 /* Copy the permanent MAC from the qcap response address now. */
1780 memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
1781 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
1782 bp->grp_info = rte_zmalloc("bnxt_grp_info",
1783 sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
1784 if (!bp->grp_info) {
1786 "Failed to alloc %zu bytes needed to store group info table\n",
1787 sizeof(*bp->grp_info) * bp->max_ring_grps);
1792 /* Forward all requests if firmware is new enough */
1793 if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
1794 (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
1795 ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
1796 memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
1798 RTE_LOG(WARNING, PMD,
1799 "Firmware too old for VF mailbox functionality\n");
1800 memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
1804 * The following are used for driver cleanup. If we disallow these,
1805 * VF drivers can't clean up cleanly.
1807 ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
1808 ALLOW_FUNC(HWRM_VNIC_FREE);
1809 ALLOW_FUNC(HWRM_RING_FREE);
1810 ALLOW_FUNC(HWRM_RING_GRP_FREE);
1811 ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
1812 ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
1813 ALLOW_FUNC(HWRM_STAT_CTX_FREE);
1814 rc = bnxt_hwrm_func_driver_register(bp);
1817 "Failed to register driver");
1823 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
1824 pci_dev->mem_resource[0].phys_addr,
1825 pci_dev->mem_resource[0].addr);
1827 rc = bnxt_hwrm_func_reset(bp);
1829 RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
1835 //if (bp->pf.active_vfs) {
1836 // TODO: Deallocate VF resources?
1838 if (bp->pdev->max_vfs) {
1839 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
1841 RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
1845 rc = bnxt_hwrm_allocate_pf_only(bp);
1848 "Failed to allocate PF resources\n");
1854 bnxt_hwrm_port_led_qcaps(bp);
1856 rc = bnxt_setup_int(bp);
1860 rc = bnxt_alloc_mem(bp);
1862 goto error_free_int;
1864 rc = bnxt_request_int(bp);
1866 goto error_free_int;
1868 rc = bnxt_alloc_def_cp_ring(bp);
1870 goto error_free_int;
1872 bnxt_enable_int(bp);
1877 bnxt_disable_int(bp);
1878 bnxt_free_def_cp_ring(bp);
1879 bnxt_hwrm_func_buf_unrgtr(bp);
1883 bnxt_dev_uninit(eth_dev);
1889 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
1890 struct bnxt *bp = eth_dev->data->dev_private;
1893 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1896 bnxt_disable_int(bp);
1899 if (eth_dev->data->mac_addrs != NULL) {
1900 rte_free(eth_dev->data->mac_addrs);
1901 eth_dev->data->mac_addrs = NULL;
1903 if (bp->grp_info != NULL) {
1904 rte_free(bp->grp_info);
1905 bp->grp_info = NULL;
1907 rc = bnxt_hwrm_func_driver_unregister(bp, 0);
1908 bnxt_free_hwrm_resources(bp);
1909 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
1910 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
1911 if (bp->dev_stopped == 0)
1912 bnxt_dev_close_op(eth_dev);
1914 rte_free(bp->pf.vf_info);
1915 eth_dev->dev_ops = NULL;
1916 eth_dev->rx_pkt_burst = NULL;
1917 eth_dev->tx_pkt_burst = NULL;
1922 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1923 struct rte_pci_device *pci_dev)
1925 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
1929 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
1931 return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit);
1934 static struct rte_pci_driver bnxt_rte_pmd = {
1935 .id_table = bnxt_pci_id_map,
1936 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
1937 RTE_PCI_DRV_INTR_LSC,
1938 .probe = bnxt_pci_probe,
1939 .remove = bnxt_pci_remove,
1943 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1945 if (strcmp(dev->device->driver->name, drv->driver.name))
1951 bool is_bnxt_supported(struct rte_eth_dev *dev)
1953 return is_device_supported(dev, &bnxt_rte_pmd);
1956 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
1957 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
1958 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");