4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_pci.h>
40 #include <rte_malloc.h>
41 #include <rte_cycles.h>
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
48 #include "bnxt_ring.h"
51 #include "bnxt_stats.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
57 #define DRV_MODULE_NAME "bnxt"
58 static const char bnxt_version[] =
59 "Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
61 #define PCI_VENDOR_ID_BROADCOM 0x14E4
63 #define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609
64 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
65 #define BROADCOM_DEV_ID_57414_VF 0x16c1
66 #define BROADCOM_DEV_ID_57301 0x16c8
67 #define BROADCOM_DEV_ID_57302 0x16c9
68 #define BROADCOM_DEV_ID_57304_PF 0x16ca
69 #define BROADCOM_DEV_ID_57304_VF 0x16cb
70 #define BROADCOM_DEV_ID_57417_MF 0x16cc
71 #define BROADCOM_DEV_ID_NS2 0x16cd
72 #define BROADCOM_DEV_ID_57311 0x16ce
73 #define BROADCOM_DEV_ID_57312 0x16cf
74 #define BROADCOM_DEV_ID_57402 0x16d0
75 #define BROADCOM_DEV_ID_57404 0x16d1
76 #define BROADCOM_DEV_ID_57406_PF 0x16d2
77 #define BROADCOM_DEV_ID_57406_VF 0x16d3
78 #define BROADCOM_DEV_ID_57402_MF 0x16d4
79 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
80 #define BROADCOM_DEV_ID_57412 0x16d6
81 #define BROADCOM_DEV_ID_57414 0x16d7
82 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8
83 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9
84 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
85 #define BROADCOM_DEV_ID_57412_MF 0x16de
86 #define BROADCOM_DEV_ID_57314 0x16df
87 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0
88 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
89 #define BROADCOM_DEV_ID_57417_SFP 0x16e2
90 #define BROADCOM_DEV_ID_57416_SFP 0x16e3
91 #define BROADCOM_DEV_ID_57317_SFP 0x16e4
92 #define BROADCOM_DEV_ID_57404_MF 0x16e7
93 #define BROADCOM_DEV_ID_57406_MF 0x16e8
94 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
95 #define BROADCOM_DEV_ID_57407_MF 0x16ea
96 #define BROADCOM_DEV_ID_57414_MF 0x16ec
97 #define BROADCOM_DEV_ID_57416_MF 0x16ee
99 static const struct rte_pci_id bnxt_pci_id_map[] = {
100 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
101 BROADCOM_DEV_ID_STRATUS_NIC_VF) },
102 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
103 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
105 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
106 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
107 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
111 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
113 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
136 { .vendor_id = 0, /* sentinel */ },
139 #define BNXT_ETH_RSS_SUPPORT ( \
141 ETH_RSS_NONFRAG_IPV4_TCP | \
142 ETH_RSS_NONFRAG_IPV4_UDP | \
144 ETH_RSS_NONFRAG_IPV6_TCP | \
145 ETH_RSS_NONFRAG_IPV6_UDP)
147 static void bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
149 /***********************/
152 * High level utility functions
155 static void bnxt_free_mem(struct bnxt *bp)
157 bnxt_free_filter_mem(bp);
158 bnxt_free_vnic_attributes(bp);
159 bnxt_free_vnic_mem(bp);
162 bnxt_free_tx_rings(bp);
163 bnxt_free_rx_rings(bp);
164 bnxt_free_def_cp_ring(bp);
167 static int bnxt_alloc_mem(struct bnxt *bp)
171 /* Default completion ring */
172 rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
176 rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
177 bp->def_cp_ring, "def_cp");
181 rc = bnxt_alloc_vnic_mem(bp);
185 rc = bnxt_alloc_vnic_attributes(bp);
189 rc = bnxt_alloc_filter_mem(bp);
200 static int bnxt_init_chip(struct bnxt *bp)
202 unsigned int i, rss_idx, fw_idx;
203 struct rte_eth_link new;
206 if (bp->eth_dev->data->mtu > ETHER_MTU) {
207 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
208 bp->flags |= BNXT_FLAG_JUMBO;
210 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
211 bp->flags &= ~BNXT_FLAG_JUMBO;
214 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
216 RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
220 rc = bnxt_alloc_hwrm_rings(bp);
222 RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
226 rc = bnxt_alloc_all_hwrm_ring_grps(bp);
228 RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
232 rc = bnxt_mq_rx_configure(bp);
234 RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
238 /* VNIC configuration */
239 for (i = 0; i < bp->nr_vnics; i++) {
240 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
242 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
244 RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
249 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
252 "HWRM vnic %d ctx alloc failure rc: %x\n",
257 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
259 RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
264 rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
267 "HWRM vnic %d filter failure rc: %x\n",
271 if (vnic->rss_table && vnic->hash_type) {
273 * Fill the RSS hash & redirection table with
274 * ring group ids for all VNICs
276 for (rss_idx = 0, fw_idx = 0;
277 rss_idx < HW_HASH_INDEX_SIZE;
278 rss_idx++, fw_idx++) {
279 if (vnic->fw_grp_ids[fw_idx] ==
282 vnic->rss_table[rss_idx] =
283 vnic->fw_grp_ids[fw_idx];
285 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
288 "HWRM vnic %d set RSS failure rc: %x\n",
294 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
296 if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
297 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
299 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
301 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
304 "HWRM cfa l2 rx mask failure rc: %x\n", rc);
308 rc = bnxt_get_hwrm_link_config(bp, &new);
310 RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
314 if (!bp->link_info.link_up) {
315 rc = bnxt_set_hwrm_link_config(bp, true);
318 "HWRM link config failure rc: %x\n", rc);
326 bnxt_free_all_hwrm_resources(bp);
331 static int bnxt_shutdown_nic(struct bnxt *bp)
333 bnxt_free_all_hwrm_resources(bp);
334 bnxt_free_all_filters(bp);
335 bnxt_free_all_vnics(bp);
339 static int bnxt_init_nic(struct bnxt *bp)
343 bnxt_init_ring_grps(bp);
345 bnxt_init_filters(bp);
347 rc = bnxt_init_chip(bp);
355 * Device configuration and status function
358 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
359 struct rte_eth_dev_info *dev_info)
361 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
362 uint16_t max_vnics, i, j, vpool, vrxq;
363 unsigned int max_rx_rings;
365 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
368 dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
369 dev_info->max_hash_mac_addrs = 0;
371 /* PF/VF specifics */
373 dev_info->max_vfs = bp->pdev->max_vfs;
374 max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
375 RTE_MIN(bp->max_rsscos_ctx,
377 /* For the sake of symmetry, max_rx_queues = max_tx_queues */
378 dev_info->max_rx_queues = max_rx_rings;
379 dev_info->max_tx_queues = max_rx_rings;
380 dev_info->reta_size = bp->max_rsscos_ctx;
381 dev_info->hash_key_size = 40;
382 max_vnics = bp->max_vnics;
384 /* Fast path specifics */
385 dev_info->min_rx_bufsize = 1;
386 dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
388 dev_info->rx_offload_capa = 0;
389 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
390 DEV_TX_OFFLOAD_TCP_CKSUM |
391 DEV_TX_OFFLOAD_UDP_CKSUM |
392 DEV_TX_OFFLOAD_TCP_TSO |
393 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
394 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
395 DEV_TX_OFFLOAD_GRE_TNL_TSO |
396 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
397 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
400 dev_info->default_rxconf = (struct rte_eth_rxconf) {
406 .rx_free_thresh = 32,
410 dev_info->default_txconf = (struct rte_eth_txconf) {
416 .tx_free_thresh = 32,
418 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
419 ETH_TXQ_FLAGS_NOOFFLOADS,
421 eth_dev->data->dev_conf.intr_conf.lsc = 1;
426 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
427 * need further investigation.
431 vpool = 64; /* ETH_64_POOLS */
432 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
433 for (i = 0; i < 4; vpool >>= 1, i++) {
434 if (max_vnics > vpool) {
435 for (j = 0; j < 5; vrxq >>= 1, j++) {
436 if (dev_info->max_rx_queues > vrxq) {
442 /* Not enough resources to support VMDq */
446 /* Not enough resources to support VMDq */
450 dev_info->max_vmdq_pools = vpool;
451 dev_info->vmdq_queue_num = vrxq;
453 dev_info->vmdq_pool_base = 0;
454 dev_info->vmdq_queue_base = 0;
457 /* Configure the device based on the configuration provided */
458 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
460 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
462 bp->rx_queues = (void *)eth_dev->data->rx_queues;
463 bp->tx_queues = (void *)eth_dev->data->tx_queues;
465 /* Inherit new configurations */
466 bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
467 bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
468 bp->rx_cp_nr_rings = bp->rx_nr_rings;
469 bp->tx_cp_nr_rings = bp->tx_nr_rings;
471 if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
473 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
474 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
479 rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev,
480 struct rte_eth_link *link)
482 struct rte_eth_link *dst = ð_dev->data->dev_link;
483 struct rte_eth_link *src = link;
485 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
486 *(uint64_t *)src) == 0)
492 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
494 struct rte_eth_link *link = ð_dev->data->dev_link;
496 if (link->link_status)
497 RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
498 (uint8_t)(eth_dev->data->port_id),
499 (uint32_t)link->link_speed,
500 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
501 ("full-duplex") : ("half-duplex\n"));
503 RTE_LOG(INFO, PMD, "Port %d Link Down\n",
504 (uint8_t)(eth_dev->data->port_id));
507 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
509 bnxt_print_link_info(eth_dev);
513 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
515 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
521 rc = bnxt_init_nic(bp);
525 bnxt_link_update_op(eth_dev, 0);
527 if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
528 vlan_mask |= ETH_VLAN_FILTER_MASK;
529 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
530 vlan_mask |= ETH_VLAN_STRIP_MASK;
531 bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
536 bnxt_shutdown_nic(bp);
537 bnxt_free_tx_mbufs(bp);
538 bnxt_free_rx_mbufs(bp);
542 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
544 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
546 eth_dev->data->dev_link.link_status = 1;
547 bnxt_set_hwrm_link_config(bp, true);
551 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
553 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
555 eth_dev->data->dev_link.link_status = 0;
556 bnxt_set_hwrm_link_config(bp, false);
560 /* Unload the driver, release resources */
561 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
563 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
565 if (bp->eth_dev->data->dev_started) {
566 /* TBD: STOP HW queues DMA */
567 eth_dev->data->dev_link.link_status = 0;
569 bnxt_set_hwrm_link_config(bp, false);
570 bnxt_hwrm_port_clr_stats(bp);
571 bnxt_shutdown_nic(bp);
575 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
577 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
579 if (bp->dev_stopped == 0)
580 bnxt_dev_stop_op(eth_dev);
582 bnxt_free_tx_mbufs(bp);
583 bnxt_free_rx_mbufs(bp);
585 if (eth_dev->data->mac_addrs != NULL) {
586 rte_free(eth_dev->data->mac_addrs);
587 eth_dev->data->mac_addrs = NULL;
589 if (bp->grp_info != NULL) {
590 rte_free(bp->grp_info);
595 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
598 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
599 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
600 struct bnxt_vnic_info *vnic;
601 struct bnxt_filter_info *filter, *temp_filter;
605 * Loop through all VNICs from the specified filter flow pools to
606 * remove the corresponding MAC addr filter
608 for (i = 0; i < MAX_FF_POOLS; i++) {
609 if (!(pool_mask & (1ULL << i)))
612 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
613 filter = STAILQ_FIRST(&vnic->filter);
615 temp_filter = STAILQ_NEXT(filter, next);
616 if (filter->mac_index == index) {
617 STAILQ_REMOVE(&vnic->filter, filter,
618 bnxt_filter_info, next);
619 bnxt_hwrm_clear_l2_filter(bp, filter);
620 filter->mac_index = INVALID_MAC_INDEX;
621 memset(&filter->l2_addr, 0,
624 &bp->free_filter_list,
627 filter = temp_filter;
633 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
634 struct ether_addr *mac_addr,
635 uint32_t index, uint32_t pool)
637 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
638 struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
639 struct bnxt_filter_info *filter;
642 RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
647 RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
650 /* Attach requested MAC address to the new l2_filter */
651 STAILQ_FOREACH(filter, &vnic->filter, next) {
652 if (filter->mac_index == index) {
654 "MAC addr already existed for pool %d\n", pool);
658 filter = bnxt_alloc_filter(bp);
660 RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
663 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
664 filter->mac_index = index;
665 memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
666 return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
669 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
672 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
673 struct rte_eth_link new;
674 unsigned int cnt = BNXT_LINK_WAIT_CNT;
676 memset(&new, 0, sizeof(new));
678 /* Retrieve link info from hardware */
679 rc = bnxt_get_hwrm_link_config(bp, &new);
681 new.link_speed = ETH_LINK_SPEED_100M;
682 new.link_duplex = ETH_LINK_FULL_DUPLEX;
684 "Failed to retrieve link rc = 0x%x!\n", rc);
687 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
689 if (!wait_to_complete)
691 } while (!new.link_status && cnt--);
694 /* Timed out or success */
695 if (new.link_status != eth_dev->data->dev_link.link_status ||
696 new.link_speed != eth_dev->data->dev_link.link_speed) {
697 rte_bnxt_atomic_write_link_status(eth_dev, &new);
698 bnxt_print_link_info(eth_dev);
704 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
706 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
707 struct bnxt_vnic_info *vnic;
709 if (bp->vnic_info == NULL)
712 vnic = &bp->vnic_info[0];
714 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
715 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
718 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
720 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
721 struct bnxt_vnic_info *vnic;
723 if (bp->vnic_info == NULL)
726 vnic = &bp->vnic_info[0];
728 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
729 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
732 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
734 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
735 struct bnxt_vnic_info *vnic;
737 if (bp->vnic_info == NULL)
740 vnic = &bp->vnic_info[0];
742 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
743 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
746 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
748 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
749 struct bnxt_vnic_info *vnic;
751 if (bp->vnic_info == NULL)
754 vnic = &bp->vnic_info[0];
756 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
757 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
760 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
761 struct rte_eth_rss_reta_entry64 *reta_conf,
764 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
765 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
766 struct bnxt_vnic_info *vnic;
769 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
772 if (reta_size != HW_HASH_INDEX_SIZE) {
773 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
774 "(%d) must equal the size supported by the hardware "
775 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
778 /* Update the RSS VNIC(s) */
779 for (i = 0; i < MAX_FF_POOLS; i++) {
780 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
781 memcpy(vnic->rss_table, reta_conf, reta_size);
783 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
789 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
790 struct rte_eth_rss_reta_entry64 *reta_conf,
793 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
794 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
795 struct rte_intr_handle *intr_handle
796 = &bp->pdev->intr_handle;
798 /* Retrieve from the default VNIC */
801 if (!vnic->rss_table)
804 if (reta_size != HW_HASH_INDEX_SIZE) {
805 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
806 "(%d) must equal the size supported by the hardware "
807 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
810 /* EW - need to revisit here copying from u64 to u16 */
811 memcpy(reta_conf, vnic->rss_table, reta_size);
813 if (rte_intr_allow_others(intr_handle)) {
814 if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
815 bnxt_dev_lsc_intr_setup(eth_dev);
821 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
822 struct rte_eth_rss_conf *rss_conf)
824 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
825 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
826 struct bnxt_vnic_info *vnic;
827 uint16_t hash_type = 0;
831 * If RSS enablement were different than dev_configure,
832 * then return -EINVAL
834 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
835 if (!rss_conf->rss_hf)
836 RTE_LOG(ERR, PMD, "Hash type NONE\n");
838 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
842 bp->flags |= BNXT_FLAG_UPDATE_HASH;
843 memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
845 if (rss_conf->rss_hf & ETH_RSS_IPV4)
846 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
847 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
848 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
849 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
850 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
851 if (rss_conf->rss_hf & ETH_RSS_IPV6)
852 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
853 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
854 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
855 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
856 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
858 /* Update the RSS VNIC(s) */
859 for (i = 0; i < MAX_FF_POOLS; i++) {
860 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
861 vnic->hash_type = hash_type;
864 * Use the supplied key if the key length is
865 * acceptable and the rss_key is not NULL
867 if (rss_conf->rss_key &&
868 rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
869 memcpy(vnic->rss_hash_key, rss_conf->rss_key,
870 rss_conf->rss_key_len);
872 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
878 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
879 struct rte_eth_rss_conf *rss_conf)
881 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
882 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
886 /* RSS configuration is the same for all VNICs */
887 if (vnic && vnic->rss_hash_key) {
888 if (rss_conf->rss_key) {
889 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
890 rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
891 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
894 hash_types = vnic->hash_type;
895 rss_conf->rss_hf = 0;
896 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
897 rss_conf->rss_hf |= ETH_RSS_IPV4;
898 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
900 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
901 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
903 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
905 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
906 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
908 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
910 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
911 rss_conf->rss_hf |= ETH_RSS_IPV6;
912 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
914 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
915 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
917 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
919 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
920 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
922 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
926 "Unknwon RSS config from firmware (%08x), RSS disabled",
931 rss_conf->rss_hf = 0;
936 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
937 struct rte_eth_fc_conf *fc_conf)
939 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
940 struct rte_eth_link link_info;
943 rc = bnxt_get_hwrm_link_config(bp, &link_info);
947 memset(fc_conf, 0, sizeof(*fc_conf));
948 if (bp->link_info.auto_pause)
949 fc_conf->autoneg = 1;
950 switch (bp->link_info.pause) {
952 fc_conf->mode = RTE_FC_NONE;
954 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
955 fc_conf->mode = RTE_FC_TX_PAUSE;
957 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
958 fc_conf->mode = RTE_FC_RX_PAUSE;
960 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
961 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
962 fc_conf->mode = RTE_FC_FULL;
968 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
969 struct rte_eth_fc_conf *fc_conf)
971 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
973 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
974 RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
978 switch (fc_conf->mode) {
980 bp->link_info.auto_pause = 0;
981 bp->link_info.force_pause = 0;
983 case RTE_FC_RX_PAUSE:
984 if (fc_conf->autoneg) {
985 bp->link_info.auto_pause =
986 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
987 bp->link_info.force_pause = 0;
989 bp->link_info.auto_pause = 0;
990 bp->link_info.force_pause =
991 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
994 case RTE_FC_TX_PAUSE:
995 if (fc_conf->autoneg) {
996 bp->link_info.auto_pause =
997 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
998 bp->link_info.force_pause = 0;
1000 bp->link_info.auto_pause = 0;
1001 bp->link_info.force_pause =
1002 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1006 if (fc_conf->autoneg) {
1007 bp->link_info.auto_pause =
1008 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1009 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1010 bp->link_info.force_pause = 0;
1012 bp->link_info.auto_pause = 0;
1013 bp->link_info.force_pause =
1014 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1015 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1019 return bnxt_set_hwrm_link_config(bp, true);
1022 /* Add UDP tunneling port */
1024 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1025 struct rte_eth_udp_tunnel *udp_tunnel)
1027 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1028 uint16_t tunnel_type = 0;
1031 switch (udp_tunnel->prot_type) {
1032 case RTE_TUNNEL_TYPE_VXLAN:
1033 if (bp->vxlan_port_cnt) {
1034 RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
1035 udp_tunnel->udp_port);
1036 if (bp->vxlan_port != udp_tunnel->udp_port) {
1037 RTE_LOG(ERR, PMD, "Only one port allowed\n");
1040 bp->vxlan_port_cnt++;
1044 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
1045 bp->vxlan_port_cnt++;
1047 case RTE_TUNNEL_TYPE_GENEVE:
1048 if (bp->geneve_port_cnt) {
1049 RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
1050 udp_tunnel->udp_port);
1051 if (bp->geneve_port != udp_tunnel->udp_port) {
1052 RTE_LOG(ERR, PMD, "Only one port allowed\n");
1055 bp->geneve_port_cnt++;
1059 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
1060 bp->geneve_port_cnt++;
1063 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
1066 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
1072 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
1073 struct rte_eth_udp_tunnel *udp_tunnel)
1075 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1076 uint16_t tunnel_type = 0;
1080 switch (udp_tunnel->prot_type) {
1081 case RTE_TUNNEL_TYPE_VXLAN:
1082 if (!bp->vxlan_port_cnt) {
1083 RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
1086 if (bp->vxlan_port != udp_tunnel->udp_port) {
1087 RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
1088 udp_tunnel->udp_port, bp->vxlan_port);
1091 if (--bp->vxlan_port_cnt)
1095 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
1096 port = bp->vxlan_fw_dst_port_id;
1098 case RTE_TUNNEL_TYPE_GENEVE:
1099 if (!bp->geneve_port_cnt) {
1100 RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
1103 if (bp->geneve_port != udp_tunnel->udp_port) {
1104 RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
1105 udp_tunnel->udp_port, bp->geneve_port);
1108 if (--bp->geneve_port_cnt)
1112 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
1113 port = bp->geneve_fw_dst_port_id;
1116 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
1120 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
1123 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
1126 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
1127 bp->geneve_port = 0;
1132 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1134 struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1135 struct bnxt_vnic_info *vnic;
1138 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1140 /* Cycle through all VNICs */
1141 for (i = 0; i < bp->nr_vnics; i++) {
1143 * For each VNIC and each associated filter(s)
1144 * if VLAN exists && VLAN matches vlan_id
1145 * remove the MAC+VLAN filter
1146 * add a new MAC only filter
1148 * VLAN filter doesn't exist, just skip and continue
1150 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1151 filter = STAILQ_FIRST(&vnic->filter);
1153 temp_filter = STAILQ_NEXT(filter, next);
1155 if (filter->enables & chk &&
1156 filter->l2_ovlan == vlan_id) {
1157 /* Must delete the filter */
1158 STAILQ_REMOVE(&vnic->filter, filter,
1159 bnxt_filter_info, next);
1160 bnxt_hwrm_clear_l2_filter(bp, filter);
1162 &bp->free_filter_list,
1166 * Need to examine to see if the MAC
1167 * filter already existed or not before
1168 * allocating a new one
1171 new_filter = bnxt_alloc_filter(bp);
1174 "MAC/VLAN filter alloc failed\n");
1178 STAILQ_INSERT_TAIL(&vnic->filter,
1180 /* Inherit MAC from previous filter */
1181 new_filter->mac_index =
1183 memcpy(new_filter->l2_addr,
1184 filter->l2_addr, ETHER_ADDR_LEN);
1185 /* MAC only filter */
1186 rc = bnxt_hwrm_set_l2_filter(bp,
1192 "Del Vlan filter for %d\n",
1195 filter = temp_filter;
1203 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1205 struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1206 struct bnxt_vnic_info *vnic;
1209 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
1210 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
1211 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1213 /* Cycle through all VNICs */
1214 for (i = 0; i < bp->nr_vnics; i++) {
1216 * For each VNIC and each associated filter(s)
1218 * if VLAN matches vlan_id
1219 * VLAN filter already exists, just skip and continue
1221 * add a new MAC+VLAN filter
1223 * Remove the old MAC only filter
1224 * Add a new MAC+VLAN filter
1226 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1227 filter = STAILQ_FIRST(&vnic->filter);
1229 temp_filter = STAILQ_NEXT(filter, next);
1231 if (filter->enables & chk) {
1232 if (filter->l2_ovlan == vlan_id)
1235 /* Must delete the MAC filter */
1236 STAILQ_REMOVE(&vnic->filter, filter,
1237 bnxt_filter_info, next);
1238 bnxt_hwrm_clear_l2_filter(bp, filter);
1239 filter->l2_ovlan = 0;
1241 &bp->free_filter_list,
1244 new_filter = bnxt_alloc_filter(bp);
1247 "MAC/VLAN filter alloc failed\n");
1251 STAILQ_INSERT_TAIL(&vnic->filter, new_filter,
1253 /* Inherit MAC from the previous filter */
1254 new_filter->mac_index = filter->mac_index;
1255 memcpy(new_filter->l2_addr, filter->l2_addr,
1257 /* MAC + VLAN ID filter */
1258 new_filter->l2_ovlan = vlan_id;
1259 new_filter->l2_ovlan_mask = 0xF000;
1260 new_filter->enables |= en;
1261 rc = bnxt_hwrm_set_l2_filter(bp,
1267 "Added Vlan filter for %d\n", vlan_id);
1269 filter = temp_filter;
1277 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
1278 uint16_t vlan_id, int on)
1280 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1282 /* These operations apply to ALL existing MAC/VLAN filters */
1284 return bnxt_add_vlan_filter(bp, vlan_id);
1286 return bnxt_del_vlan_filter(bp, vlan_id);
1290 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
1292 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1295 if (mask & ETH_VLAN_FILTER_MASK) {
1296 if (!dev->data->dev_conf.rxmode.hw_vlan_filter) {
1297 /* Remove any VLAN filters programmed */
1298 for (i = 0; i < 4095; i++)
1299 bnxt_del_vlan_filter(bp, i);
1301 RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n",
1302 dev->data->dev_conf.rxmode.hw_vlan_filter);
1305 if (mask & ETH_VLAN_STRIP_MASK) {
1306 /* Enable or disable VLAN stripping */
1307 for (i = 0; i < bp->nr_vnics; i++) {
1308 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1309 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1310 vnic->vlan_strip = true;
1312 vnic->vlan_strip = false;
1313 bnxt_hwrm_vnic_cfg(bp, vnic);
1315 RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n",
1316 dev->data->dev_conf.rxmode.hw_vlan_strip);
1319 if (mask & ETH_VLAN_EXTEND_MASK)
1320 RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
1324 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
1326 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1327 /* Default Filter is tied to VNIC 0 */
1328 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
1329 struct bnxt_filter_info *filter;
1335 memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
1336 memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
1338 STAILQ_FOREACH(filter, &vnic->filter, next) {
1339 /* Default Filter is at Index 0 */
1340 if (filter->mac_index != 0)
1342 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1345 memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
1346 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
1347 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
1349 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
1350 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
1351 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1354 filter->mac_index = 0;
1355 RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
1360 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
1361 struct ether_addr *mc_addr_set,
1362 uint32_t nb_mc_addr)
1364 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1365 char *mc_addr_list = (char *)mc_addr_set;
1366 struct bnxt_vnic_info *vnic;
1367 uint32_t off = 0, i = 0;
1369 vnic = &bp->vnic_info[0];
1371 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
1372 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1376 /* TODO Check for Duplicate mcast addresses */
1377 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1378 for (i = 0; i < nb_mc_addr; i++) {
1379 memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
1380 off += ETHER_ADDR_LEN;
1383 vnic->mc_addr_cnt = i;
1386 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1390 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1392 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1393 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
1394 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
1395 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
1398 ret = snprintf(fw_version, fw_size, "%d.%d.%d",
1399 fw_major, fw_minor, fw_updt);
1401 ret += 1; /* add the size of '\0' */
1402 if (fw_size < (uint32_t)ret)
1409 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1410 struct rte_eth_rxq_info *qinfo)
1412 struct bnxt_rx_queue *rxq;
1414 rxq = dev->data->rx_queues[queue_id];
1416 qinfo->mp = rxq->mb_pool;
1417 qinfo->scattered_rx = dev->data->scattered_rx;
1418 qinfo->nb_desc = rxq->nb_rx_desc;
1420 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1421 qinfo->conf.rx_drop_en = 0;
1422 qinfo->conf.rx_deferred_start = 0;
1426 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1427 struct rte_eth_txq_info *qinfo)
1429 struct bnxt_tx_queue *txq;
1431 txq = dev->data->tx_queues[queue_id];
1433 qinfo->nb_desc = txq->nb_tx_desc;
1435 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1436 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1437 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1439 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1440 qinfo->conf.tx_rs_thresh = 0;
1441 qinfo->conf.txq_flags = txq->txq_flags;
1442 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1445 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
1447 struct bnxt *bp = eth_dev->data->dev_private;
1448 struct rte_eth_dev_info dev_info;
1449 uint32_t max_dev_mtu;
1453 bnxt_dev_info_get_op(eth_dev, &dev_info);
1454 max_dev_mtu = dev_info.max_rx_pktlen -
1455 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
1457 if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
1458 RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
1459 ETHER_MIN_MTU, max_dev_mtu);
1464 if (new_mtu > ETHER_MTU) {
1465 bp->flags |= BNXT_FLAG_JUMBO;
1466 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
1468 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
1469 bp->flags &= ~BNXT_FLAG_JUMBO;
1472 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
1473 new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1475 eth_dev->data->mtu = new_mtu;
1476 RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu);
1478 for (i = 0; i < bp->nr_vnics; i++) {
1479 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1481 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1482 ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1483 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
1487 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
1496 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
1498 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1499 uint16_t vlan = bp->vlan;
1502 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
1504 "PVID cannot be modified for this function\n");
1507 bp->vlan = on ? pvid : 0;
1509 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
1516 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
1518 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1520 return bnxt_hwrm_port_led_cfg(bp, true);
1524 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
1526 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1528 return bnxt_hwrm_port_led_cfg(bp, false);
1532 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1534 uint32_t desc = 0, raw_cons = 0, cons;
1535 struct bnxt_cp_ring_info *cpr;
1536 struct bnxt_rx_queue *rxq;
1537 struct rx_pkt_cmpl *rxcmp;
1542 rxq = dev->data->rx_queues[rx_queue_id];
1546 while (raw_cons < rxq->nb_rx_desc) {
1547 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
1548 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1550 if (!CMPL_VALID(rxcmp, valid))
1552 valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid);
1553 cmp_type = CMP_TYPE(rxcmp);
1554 if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) {
1555 cmp = (rte_le_to_cpu_32(
1556 ((struct rx_tpa_end_cmpl *)
1557 (rxcmp))->agg_bufs_v1) &
1558 RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
1559 RX_TPA_END_CMPL_AGG_BUFS_SFT;
1561 } else if (cmp_type == 0x11) {
1563 cmp = (rxcmp->agg_bufs_v1 &
1564 RX_PKT_CMPL_AGG_BUFS_MASK) >>
1565 RX_PKT_CMPL_AGG_BUFS_SFT;
1570 raw_cons += cmp ? cmp : 2;
1577 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
1579 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
1580 struct bnxt_rx_ring_info *rxr;
1581 struct bnxt_cp_ring_info *cpr;
1582 struct bnxt_sw_rx_bd *rx_buf;
1583 struct rx_pkt_cmpl *rxcmp;
1584 uint32_t cons, cp_cons;
1592 if (offset >= rxq->nb_rx_desc)
1595 cons = RING_CMP(cpr->cp_ring_struct, offset);
1596 cp_cons = cpr->cp_raw_cons;
1597 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1599 if (cons > cp_cons) {
1600 if (CMPL_VALID(rxcmp, cpr->valid))
1601 return RTE_ETH_RX_DESC_DONE;
1603 if (CMPL_VALID(rxcmp, !cpr->valid))
1604 return RTE_ETH_RX_DESC_DONE;
1606 rx_buf = &rxr->rx_buf_ring[cons];
1607 if (rx_buf->mbuf == NULL)
1608 return RTE_ETH_RX_DESC_UNAVAIL;
1611 return RTE_ETH_RX_DESC_AVAIL;
1615 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
1617 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
1618 struct bnxt_tx_ring_info *txr;
1619 struct bnxt_cp_ring_info *cpr;
1620 struct bnxt_sw_tx_bd *tx_buf;
1621 struct tx_pkt_cmpl *txcmp;
1622 uint32_t cons, cp_cons;
1630 if (offset >= txq->nb_tx_desc)
1633 cons = RING_CMP(cpr->cp_ring_struct, offset);
1634 txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1635 cp_cons = cpr->cp_raw_cons;
1637 if (cons > cp_cons) {
1638 if (CMPL_VALID(txcmp, cpr->valid))
1639 return RTE_ETH_TX_DESC_UNAVAIL;
1641 if (CMPL_VALID(txcmp, !cpr->valid))
1642 return RTE_ETH_TX_DESC_UNAVAIL;
1644 tx_buf = &txr->tx_buf_ring[cons];
1645 if (tx_buf->mbuf == NULL)
1646 return RTE_ETH_TX_DESC_DONE;
1648 return RTE_ETH_TX_DESC_FULL;
1651 static struct bnxt_filter_info *
1652 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
1653 struct rte_eth_ethertype_filter *efilter,
1654 struct bnxt_vnic_info *vnic0,
1655 struct bnxt_vnic_info *vnic,
1658 struct bnxt_filter_info *mfilter = NULL;
1662 if (efilter->ether_type != ETHER_TYPE_IPv4 &&
1663 efilter->ether_type != ETHER_TYPE_IPv6) {
1664 RTE_LOG(ERR, PMD, "unsupported ether_type(0x%04x) in"
1665 " ethertype filter.", efilter->ether_type);
1668 if (efilter->queue >= bp->rx_nr_rings) {
1669 RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
1673 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
1674 vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
1676 RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
1680 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1681 STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
1682 if ((!memcmp(efilter->mac_addr.addr_bytes,
1683 mfilter->l2_addr, ETHER_ADDR_LEN) &&
1685 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
1686 mfilter->ethertype == efilter->ether_type)) {
1692 STAILQ_FOREACH(mfilter, &vnic->filter, next)
1693 if ((!memcmp(efilter->mac_addr.addr_bytes,
1694 mfilter->l2_addr, ETHER_ADDR_LEN) &&
1695 mfilter->ethertype == efilter->ether_type &&
1697 HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
1710 bnxt_ethertype_filter(struct rte_eth_dev *dev,
1711 enum rte_filter_op filter_op,
1714 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1715 struct rte_eth_ethertype_filter *efilter =
1716 (struct rte_eth_ethertype_filter *)arg;
1717 struct bnxt_filter_info *bfilter, *filter1;
1718 struct bnxt_vnic_info *vnic, *vnic0;
1721 if (filter_op == RTE_ETH_FILTER_NOP)
1725 RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
1730 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
1731 vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
1733 switch (filter_op) {
1734 case RTE_ETH_FILTER_ADD:
1735 bnxt_match_and_validate_ether_filter(bp, efilter,
1740 bfilter = bnxt_get_unused_filter(bp);
1741 if (bfilter == NULL) {
1743 "Not enough resources for a new filter.\n");
1746 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
1747 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
1749 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
1751 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
1752 bfilter->ethertype = efilter->ether_type;
1753 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
1755 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
1756 if (filter1 == NULL) {
1761 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1762 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1764 bfilter->dst_id = vnic->fw_vnic_id;
1766 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1768 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1771 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
1774 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
1776 case RTE_ETH_FILTER_DELETE:
1777 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
1779 if (ret == -EEXIST) {
1780 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
1782 STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
1784 bnxt_free_filter(bp, filter1);
1785 } else if (ret == 0) {
1786 RTE_LOG(ERR, PMD, "No matching filter found\n");
1790 RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
1796 bnxt_free_filter(bp, bfilter);
1802 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
1803 enum rte_filter_type filter_type,
1804 enum rte_filter_op filter_op, void *arg)
1808 switch (filter_type) {
1809 case RTE_ETH_FILTER_NTUPLE:
1810 case RTE_ETH_FILTER_FDIR:
1811 case RTE_ETH_FILTER_TUNNEL:
1814 "filter type: %d: To be implemented\n", filter_type);
1816 case RTE_ETH_FILTER_ETHERTYPE:
1817 ret = bnxt_ethertype_filter(dev, filter_op, arg);
1819 case RTE_ETH_FILTER_GENERIC:
1820 if (filter_op != RTE_ETH_FILTER_GET)
1822 *(const void **)arg = &bnxt_flow_ops;
1826 "Filter type (%d) not supported", filter_type);
1837 static const struct eth_dev_ops bnxt_dev_ops = {
1838 .dev_infos_get = bnxt_dev_info_get_op,
1839 .dev_close = bnxt_dev_close_op,
1840 .dev_configure = bnxt_dev_configure_op,
1841 .dev_start = bnxt_dev_start_op,
1842 .dev_stop = bnxt_dev_stop_op,
1843 .dev_set_link_up = bnxt_dev_set_link_up_op,
1844 .dev_set_link_down = bnxt_dev_set_link_down_op,
1845 .stats_get = bnxt_stats_get_op,
1846 .stats_reset = bnxt_stats_reset_op,
1847 .rx_queue_setup = bnxt_rx_queue_setup_op,
1848 .rx_queue_release = bnxt_rx_queue_release_op,
1849 .tx_queue_setup = bnxt_tx_queue_setup_op,
1850 .tx_queue_release = bnxt_tx_queue_release_op,
1851 .reta_update = bnxt_reta_update_op,
1852 .reta_query = bnxt_reta_query_op,
1853 .rss_hash_update = bnxt_rss_hash_update_op,
1854 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
1855 .link_update = bnxt_link_update_op,
1856 .promiscuous_enable = bnxt_promiscuous_enable_op,
1857 .promiscuous_disable = bnxt_promiscuous_disable_op,
1858 .allmulticast_enable = bnxt_allmulticast_enable_op,
1859 .allmulticast_disable = bnxt_allmulticast_disable_op,
1860 .mac_addr_add = bnxt_mac_addr_add_op,
1861 .mac_addr_remove = bnxt_mac_addr_remove_op,
1862 .flow_ctrl_get = bnxt_flow_ctrl_get_op,
1863 .flow_ctrl_set = bnxt_flow_ctrl_set_op,
1864 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op,
1865 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op,
1866 .vlan_filter_set = bnxt_vlan_filter_set_op,
1867 .vlan_offload_set = bnxt_vlan_offload_set_op,
1868 .vlan_pvid_set = bnxt_vlan_pvid_set_op,
1869 .mtu_set = bnxt_mtu_set_op,
1870 .mac_addr_set = bnxt_set_default_mac_addr_op,
1871 .xstats_get = bnxt_dev_xstats_get_op,
1872 .xstats_get_names = bnxt_dev_xstats_get_names_op,
1873 .xstats_reset = bnxt_dev_xstats_reset_op,
1874 .fw_version_get = bnxt_fw_version_get,
1875 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
1876 .rxq_info_get = bnxt_rxq_info_get_op,
1877 .txq_info_get = bnxt_txq_info_get_op,
1878 .dev_led_on = bnxt_dev_led_on_op,
1879 .dev_led_off = bnxt_dev_led_off_op,
1880 .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
1881 .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
1882 .rx_queue_count = bnxt_rx_queue_count_op,
1883 .rx_descriptor_status = bnxt_rx_descriptor_status_op,
1884 .tx_descriptor_status = bnxt_tx_descriptor_status_op,
1885 .filter_ctrl = bnxt_filter_ctrl_op,
1888 static bool bnxt_vf_pciid(uint16_t id)
1890 if (id == BROADCOM_DEV_ID_57304_VF ||
1891 id == BROADCOM_DEV_ID_57406_VF ||
1892 id == BROADCOM_DEV_ID_5731X_VF ||
1893 id == BROADCOM_DEV_ID_5741X_VF ||
1894 id == BROADCOM_DEV_ID_57414_VF ||
1895 id == BROADCOM_DEV_ID_STRATUS_NIC_VF)
1900 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
1902 struct bnxt *bp = eth_dev->data->dev_private;
1903 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1906 /* enable device (incl. PCI PM wakeup), and bus-mastering */
1907 if (!pci_dev->mem_resource[0].addr) {
1909 "Cannot find PCI device base address, aborting\n");
1911 goto init_err_disable;
1914 bp->eth_dev = eth_dev;
1917 bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
1919 RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
1921 goto init_err_release;
1934 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
1936 #define ALLOW_FUNC(x) \
1938 typeof(x) arg = (x); \
1939 bp->pf.vf_req_fwd[((arg) >> 5)] &= \
1940 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
1943 bnxt_dev_init(struct rte_eth_dev *eth_dev)
1945 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1946 char mz_name[RTE_MEMZONE_NAMESIZE];
1947 const struct rte_memzone *mz = NULL;
1948 static int version_printed;
1949 uint32_t total_alloc_len;
1950 phys_addr_t mz_phys_addr;
1954 if (version_printed++ == 0)
1955 RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
1957 rte_eth_copy_pci_info(eth_dev, pci_dev);
1958 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1960 bp = eth_dev->data->dev_private;
1962 rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
1963 bp->dev_stopped = 1;
1965 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1968 if (bnxt_vf_pciid(pci_dev->id.device_id))
1969 bp->flags |= BNXT_FLAG_VF;
1971 rc = bnxt_init_board(eth_dev);
1974 "Board initialization failed rc: %x\n", rc);
1978 eth_dev->dev_ops = &bnxt_dev_ops;
1979 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1981 eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
1982 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
1984 if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
1985 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
1986 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
1987 pci_dev->addr.bus, pci_dev->addr.devid,
1988 pci_dev->addr.function, "rx_port_stats");
1989 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
1990 mz = rte_memzone_lookup(mz_name);
1991 total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
1992 sizeof(struct rx_port_stats) + 512);
1994 mz = rte_memzone_reserve(mz_name, total_alloc_len,
1997 RTE_MEMZONE_SIZE_HINT_ONLY);
2001 memset(mz->addr, 0, mz->len);
2002 mz_phys_addr = mz->phys_addr;
2003 if ((unsigned long)mz->addr == mz_phys_addr) {
2004 RTE_LOG(WARNING, PMD,
2005 "Memzone physical address same as virtual.\n");
2006 RTE_LOG(WARNING, PMD,
2007 "Using rte_mem_virt2phy()\n");
2008 mz_phys_addr = rte_mem_virt2phy(mz->addr);
2009 if (mz_phys_addr == 0) {
2011 "unable to map address to physical memory\n");
2016 bp->rx_mem_zone = (const void *)mz;
2017 bp->hw_rx_port_stats = mz->addr;
2018 bp->hw_rx_port_stats_map = mz_phys_addr;
2020 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
2021 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
2022 pci_dev->addr.bus, pci_dev->addr.devid,
2023 pci_dev->addr.function, "tx_port_stats");
2024 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
2025 mz = rte_memzone_lookup(mz_name);
2026 total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
2027 sizeof(struct tx_port_stats) + 512);
2029 mz = rte_memzone_reserve(mz_name, total_alloc_len,
2032 RTE_MEMZONE_SIZE_HINT_ONLY);
2036 memset(mz->addr, 0, mz->len);
2037 mz_phys_addr = mz->phys_addr;
2038 if ((unsigned long)mz->addr == mz_phys_addr) {
2039 RTE_LOG(WARNING, PMD,
2040 "Memzone physical address same as virtual.\n");
2041 RTE_LOG(WARNING, PMD,
2042 "Using rte_mem_virt2phy()\n");
2043 mz_phys_addr = rte_mem_virt2phy(mz->addr);
2044 if (mz_phys_addr == 0) {
2046 "unable to map address to physical memory\n");
2051 bp->tx_mem_zone = (const void *)mz;
2052 bp->hw_tx_port_stats = mz->addr;
2053 bp->hw_tx_port_stats_map = mz_phys_addr;
2055 bp->flags |= BNXT_FLAG_PORT_STATS;
2058 rc = bnxt_alloc_hwrm_resources(bp);
2061 "hwrm resource allocation failure rc: %x\n", rc);
2064 rc = bnxt_hwrm_ver_get(bp);
2067 bnxt_hwrm_queue_qportcfg(bp);
2069 bnxt_hwrm_func_qcfg(bp);
2071 /* Get the MAX capabilities for this function */
2072 rc = bnxt_hwrm_func_qcaps(bp);
2074 RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
2077 if (bp->max_tx_rings == 0) {
2078 RTE_LOG(ERR, PMD, "No TX rings available!\n");
2082 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
2083 ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
2084 if (eth_dev->data->mac_addrs == NULL) {
2086 "Failed to alloc %u bytes needed to store MAC addr tbl",
2087 ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR);
2091 /* Copy the permanent MAC from the qcap response address now. */
2092 memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
2093 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
2094 bp->grp_info = rte_zmalloc("bnxt_grp_info",
2095 sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
2096 if (!bp->grp_info) {
2098 "Failed to alloc %zu bytes needed to store group info table\n",
2099 sizeof(*bp->grp_info) * bp->max_ring_grps);
2104 /* Forward all requests if firmware is new enough */
2105 if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
2106 (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
2107 ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
2108 memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
2110 RTE_LOG(WARNING, PMD,
2111 "Firmware too old for VF mailbox functionality\n");
2112 memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
2116 * The following are used for driver cleanup. If we disallow these,
2117 * VF drivers can't clean up cleanly.
2119 ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
2120 ALLOW_FUNC(HWRM_VNIC_FREE);
2121 ALLOW_FUNC(HWRM_RING_FREE);
2122 ALLOW_FUNC(HWRM_RING_GRP_FREE);
2123 ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
2124 ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
2125 ALLOW_FUNC(HWRM_STAT_CTX_FREE);
2126 rc = bnxt_hwrm_func_driver_register(bp);
2129 "Failed to register driver");
2135 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
2136 pci_dev->mem_resource[0].phys_addr,
2137 pci_dev->mem_resource[0].addr);
2139 rc = bnxt_hwrm_func_reset(bp);
2141 RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
2147 //if (bp->pf.active_vfs) {
2148 // TODO: Deallocate VF resources?
2150 if (bp->pdev->max_vfs) {
2151 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
2153 RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
2157 rc = bnxt_hwrm_allocate_pf_only(bp);
2160 "Failed to allocate PF resources\n");
2166 bnxt_hwrm_port_led_qcaps(bp);
2168 rc = bnxt_setup_int(bp);
2172 rc = bnxt_alloc_mem(bp);
2174 goto error_free_int;
2176 rc = bnxt_request_int(bp);
2178 goto error_free_int;
2180 rc = bnxt_alloc_def_cp_ring(bp);
2182 goto error_free_int;
2184 bnxt_enable_int(bp);
2189 bnxt_disable_int(bp);
2190 bnxt_free_def_cp_ring(bp);
2191 bnxt_hwrm_func_buf_unrgtr(bp);
2195 bnxt_dev_uninit(eth_dev);
2201 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
2202 struct bnxt *bp = eth_dev->data->dev_private;
2205 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2208 bnxt_disable_int(bp);
2211 if (eth_dev->data->mac_addrs != NULL) {
2212 rte_free(eth_dev->data->mac_addrs);
2213 eth_dev->data->mac_addrs = NULL;
2215 if (bp->grp_info != NULL) {
2216 rte_free(bp->grp_info);
2217 bp->grp_info = NULL;
2219 rc = bnxt_hwrm_func_driver_unregister(bp, 0);
2220 bnxt_free_hwrm_resources(bp);
2221 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
2222 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
2223 if (bp->dev_stopped == 0)
2224 bnxt_dev_close_op(eth_dev);
2226 rte_free(bp->pf.vf_info);
2227 eth_dev->dev_ops = NULL;
2228 eth_dev->rx_pkt_burst = NULL;
2229 eth_dev->tx_pkt_burst = NULL;
2234 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2235 struct rte_pci_device *pci_dev)
2237 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
2241 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
2243 return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit);
2246 static struct rte_pci_driver bnxt_rte_pmd = {
2247 .id_table = bnxt_pci_id_map,
2248 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
2249 RTE_PCI_DRV_INTR_LSC,
2250 .probe = bnxt_pci_probe,
2251 .remove = bnxt_pci_remove,
2255 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
2257 if (strcmp(dev->device->driver->name, drv->driver.name))
2263 bool is_bnxt_supported(struct rte_eth_dev *dev)
2265 return is_device_supported(dev, &bnxt_rte_pmd);
2268 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
2269 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
2270 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");