X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_ethdev.c;h=9f8a63afabe9b9d6078ae9e687e5c3966662fd1e;hb=1cb3d39a48f76fde859ddaa3a8f3bd32628800f2;hp=778323b441cc42fd653d4d0c2fd238c506773c56;hpb=ae34410a8a8aff1b8382e16fcc89353648355be0;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 778323b441..9f8a63afab 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -1,43 +1,17 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include #include #include -#include +#include +#include #include #include +#include #include "bnxt.h" #include "bnxt_cpr.h" @@ -52,32 +26,69 @@ #include "bnxt_txr.h" #include "bnxt_vnic.h" #include "hsi_struct_def_dpdk.h" +#include "bnxt_nvm_defs.h" +#include "bnxt_util.h" #define DRV_MODULE_NAME "bnxt" static const char bnxt_version[] = - "Broadcom Cumulus driver " DRV_MODULE_NAME "\n"; + "Broadcom NetXtreme driver " DRV_MODULE_NAME; +int bnxt_logtype_driver; #define PCI_VENDOR_ID_BROADCOM 0x14E4 +#define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606 +#define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609 +#define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 +#define BROADCOM_DEV_ID_57414_VF 0x16c1 #define BROADCOM_DEV_ID_57301 0x16c8 #define BROADCOM_DEV_ID_57302 0x16c9 #define BROADCOM_DEV_ID_57304_PF 0x16ca #define BROADCOM_DEV_ID_57304_VF 0x16cb +#define BROADCOM_DEV_ID_57417_MF 0x16cc #define BROADCOM_DEV_ID_NS2 0x16cd +#define BROADCOM_DEV_ID_57311 0x16ce +#define BROADCOM_DEV_ID_57312 0x16cf #define BROADCOM_DEV_ID_57402 0x16d0 #define BROADCOM_DEV_ID_57404 0x16d1 #define BROADCOM_DEV_ID_57406_PF 0x16d2 #define BROADCOM_DEV_ID_57406_VF 0x16d3 #define BROADCOM_DEV_ID_57402_MF 0x16d4 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5 +#define BROADCOM_DEV_ID_57412 0x16d6 +#define BROADCOM_DEV_ID_57414 0x16d7 +#define BROADCOM_DEV_ID_57416_RJ45 0x16d8 +#define BROADCOM_DEV_ID_57417_RJ45 0x16d9 #define BROADCOM_DEV_ID_5741X_VF 0x16dc +#define BROADCOM_DEV_ID_57412_MF 0x16de +#define BROADCOM_DEV_ID_57314 0x16df +#define BROADCOM_DEV_ID_57317_RJ45 0x16e0 #define BROADCOM_DEV_ID_5731X_VF 0x16e1 +#define BROADCOM_DEV_ID_57417_SFP 0x16e2 +#define BROADCOM_DEV_ID_57416_SFP 0x16e3 +#define BROADCOM_DEV_ID_57317_SFP 0x16e4 #define BROADCOM_DEV_ID_57404_MF 0x16e7 #define BROADCOM_DEV_ID_57406_MF 0x16e8 #define BROADCOM_DEV_ID_57407_SFP 0x16e9 #define BROADCOM_DEV_ID_57407_MF 0x16ea - -static struct rte_pci_id bnxt_pci_id_map[] = { +#define BROADCOM_DEV_ID_57414_MF 0x16ec +#define BROADCOM_DEV_ID_57416_MF 0x16ee +#define BROADCOM_DEV_ID_57508 0x1750 +#define BROADCOM_DEV_ID_57504 0x1751 +#define BROADCOM_DEV_ID_57502 0x1752 +#define BROADCOM_DEV_ID_57500_VF1 0x1806 +#define BROADCOM_DEV_ID_57500_VF2 0x1807 +#define BROADCOM_DEV_ID_58802 0xd802 +#define BROADCOM_DEV_ID_58804 0xd804 +#define BROADCOM_DEV_ID_58808 0x16f0 +#define BROADCOM_DEV_ID_58802_VF 0xd800 + +static const struct rte_pci_id bnxt_pci_id_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, + BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, + BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, @@ -95,6 +106,30 @@ static struct rte_pci_id bnxt_pci_id_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -106,35 +141,99 @@ static struct rte_pci_id bnxt_pci_id_map[] = { ETH_RSS_NONFRAG_IPV6_TCP | \ ETH_RSS_NONFRAG_IPV6_UDP) +#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \ + DEV_TX_OFFLOAD_GRE_TNL_TSO | \ + DEV_TX_OFFLOAD_IPIP_TNL_TSO | \ + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \ + DEV_TX_OFFLOAD_QINQ_INSERT | \ + DEV_TX_OFFLOAD_MULTI_SEGS) + +#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \ + DEV_RX_OFFLOAD_VLAN_STRIP | \ + DEV_RX_OFFLOAD_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_UDP_CKSUM | \ + DEV_RX_OFFLOAD_TCP_CKSUM | \ + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_KEEP_CRC | \ + DEV_RX_OFFLOAD_VLAN_EXTEND | \ + DEV_RX_OFFLOAD_TCP_LRO) + +static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); +static void bnxt_print_link_info(struct rte_eth_dev *eth_dev); +static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu); +static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); +static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); +static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); +static void bnxt_cancel_fw_health_check(struct bnxt *bp); + +int is_bnxt_in_error(struct bnxt *bp) +{ + if (bp->flags & BNXT_FLAG_FATAL_ERROR) + return -EIO; + if (bp->flags & BNXT_FLAG_FW_RESET) + return -EBUSY; + + return 0; +} + /***********************/ /* * High level utility functions */ -static void bnxt_free_mem(struct bnxt *bp) +uint16_t bnxt_rss_ctxts(const struct bnxt *bp) +{ + if (!BNXT_CHIP_THOR(bp)) + return 1; + + return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings, + BNXT_RSS_ENTRIES_PER_CTX_THOR) / + BNXT_RSS_ENTRIES_PER_CTX_THOR; +} + +static uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) +{ + if (!BNXT_CHIP_THOR(bp)) + return HW_HASH_INDEX_SIZE; + + return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR; +} + +static void bnxt_free_mem(struct bnxt *bp, bool reconfig) { bnxt_free_filter_mem(bp); bnxt_free_vnic_attributes(bp); bnxt_free_vnic_mem(bp); - bnxt_free_stats(bp); - bnxt_free_tx_rings(bp); - bnxt_free_rx_rings(bp); - bnxt_free_def_cp_ring(bp); + /* tx/rx rings are configured as part of *_queue_setup callbacks. + * If the number of rings change across fw update, + * we don't have much choice except to warn the user. + */ + if (!reconfig) { + bnxt_free_stats(bp); + bnxt_free_tx_rings(bp); + bnxt_free_rx_rings(bp); + } + bnxt_free_async_cp_ring(bp); } -static int bnxt_alloc_mem(struct bnxt *bp) +static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) { int rc; - /* Default completion ring */ - rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY); + rc = bnxt_alloc_ring_grps(bp); if (rc) goto alloc_mem_err; - rc = bnxt_alloc_rings(bp, 0, NULL, NULL, - bp->def_cp_ring, "def_cp"); + rc = bnxt_alloc_async_ring_struct(bp); if (rc) goto alloc_mem_err; @@ -150,122 +249,238 @@ static int bnxt_alloc_mem(struct bnxt *bp) if (rc) goto alloc_mem_err; + rc = bnxt_alloc_async_cp_ring(bp); + if (rc) + goto alloc_mem_err; + return 0; alloc_mem_err: - bnxt_free_mem(bp); + bnxt_free_mem(bp, reconfig); return rc; } static int bnxt_init_chip(struct bnxt *bp) { - unsigned int i, rss_idx, fw_idx; + struct bnxt_rx_queue *rxq; struct rte_eth_link new; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint64_t rx_offloads = dev_conf->rxmode.offloads; + uint32_t intr_vector = 0; + uint32_t queue_id, base = BNXT_MISC_VEC_ID; + uint32_t vec = BNXT_MISC_VEC_ID; + unsigned int i, j; int rc; + if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) { + bp->eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + bp->flags |= BNXT_FLAG_JUMBO; + } else { + bp->eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + bp->flags &= ~BNXT_FLAG_JUMBO; + } + + /* THOR does not support ring groups. + * But we will use the array to save RSS context IDs. + */ + if (BNXT_CHIP_THOR(bp)) + bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; + rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); if (rc) { - RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc); + PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); goto err_out; } rc = bnxt_alloc_hwrm_rings(bp); if (rc) { - RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc); + PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); goto err_out; } rc = bnxt_alloc_all_hwrm_ring_grps(bp); if (rc) { - RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc); + PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); goto err_out; } rc = bnxt_mq_rx_configure(bp); if (rc) { - RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc); + PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); goto err_out; } /* VNIC configuration */ for (i = 0; i < bp->nr_vnics; i++) { + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + rc = bnxt_vnic_grp_alloc(bp, vnic); + if (rc) + goto err_out; + + PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", + i, vnic, vnic->fw_grp_ids); + rc = bnxt_hwrm_vnic_alloc(bp, vnic); if (rc) { - RTE_LOG(ERR, PMD, "HWRM vnic alloc failure rc: %x\n", - rc); + PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n", + i, rc); goto err_out; } - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic); - if (rc) { - RTE_LOG(ERR, PMD, - "HWRM vnic ctx alloc failure rc: %x\n", rc); - goto err_out; + /* Alloc RSS context only if RSS mode is enabled */ + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { + int j, nr_ctxs = bnxt_rss_ctxts(bp); + + rc = 0; + for (j = 0; j < nr_ctxs; j++) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); + if (rc) + break; + } + if (rc) { + PMD_DRV_LOG(ERR, + "HWRM vnic %d ctx %d alloc failure rc: %x\n", + i, j, rc); + goto err_out; + } + vnic->num_lb_ctxts = nr_ctxs; } + /* + * Firmware sets pf pair in default vnic cfg. If the VLAN strip + * setting is not available at this time, it will not be + * configured correctly in the CFA. + */ + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + vnic->vlan_strip = true; + else + vnic->vlan_strip = false; + rc = bnxt_hwrm_vnic_cfg(bp, vnic); if (rc) { - RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc); + PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", + i, rc); goto err_out; } rc = bnxt_set_hwrm_vnic_filters(bp, vnic); if (rc) { - RTE_LOG(ERR, PMD, "HWRM vnic filter failure rc: %x\n", - rc); + PMD_DRV_LOG(ERR, + "HWRM vnic %d filter failure rc: %x\n", + i, rc); goto err_out; } - if (vnic->rss_table && vnic->hash_type) { - /* - * Fill the RSS hash & redirection table with - * ring group ids for all VNICs - */ - for (rss_idx = 0, fw_idx = 0; - rss_idx < HW_HASH_INDEX_SIZE; - rss_idx++, fw_idx++) { - if (vnic->fw_grp_ids[fw_idx] == - INVALID_HW_RING_ID) - fw_idx = 0; - vnic->rss_table[rss_idx] = - vnic->fw_grp_ids[fw_idx]; - } - rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); - if (rc) { - RTE_LOG(ERR, PMD, - "HWRM vnic set RSS failure rc: %x\n", - rc); - goto err_out; - } + + for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { + rxq = bp->eth_dev->data->rx_queues[j]; + + PMD_DRV_LOG(DEBUG, + "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", + j, rxq->vnic, rxq->vnic->fw_grp_ids); + + if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) + rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; + } + + rc = bnxt_vnic_rss_configure(bp, vnic); + if (rc) { + PMD_DRV_LOG(ERR, + "HWRM vnic set RSS failure rc: %x\n", rc); + goto err_out; } + + bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); + + if (bp->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_TCP_LRO) + bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); + else + bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); } - rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]); + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "HWRM cfa l2 rx mask failure rc: %x\n", rc); goto err_out; } + /* check and configure queue intr-vector mapping */ + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && + bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = bp->eth_dev->data->nb_rx_queues; + PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); + if (intr_vector > bp->rx_cp_nr_rings) { + PMD_DRV_LOG(ERR, "At most %d intr queues supported", + bp->rx_cp_nr_rings); + return -ENOTSUP; + } + rc = rte_intr_efd_enable(intr_handle, intr_vector); + if (rc) + return rc; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + bp->eth_dev->data->nb_rx_queues * + sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", bp->eth_dev->data->nb_rx_queues); + rc = -ENOMEM; + goto err_disable; + } + PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " + "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", + intr_handle->intr_vec, intr_handle->nb_efd, + intr_handle->max_intr); + for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; + queue_id++) { + intr_handle->intr_vec[queue_id] = + vec + BNXT_RX_VEC_START; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + } + + /* enable uio/vfio intr/eventfd mapping */ + rc = rte_intr_enable(intr_handle); + if (rc) + goto err_free; + rc = bnxt_get_hwrm_link_config(bp, &new); if (rc) { - RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc); - goto err_out; + PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc); + goto err_free; } if (!bp->link_info.link_up) { rc = bnxt_set_hwrm_link_config(bp, true); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "HWRM link config failure rc: %x\n", rc); - goto err_out; + goto err_free; } } + bnxt_print_link_info(bp->eth_dev); return 0; +err_free: + rte_free(intr_handle->intr_vec); +err_disable: + rte_intr_efd_disable(intr_handle); err_out: - bnxt_free_all_hwrm_resources(bp); + /* Some of the error status returned by FW may not be from errno.h */ + if (rc > 0) + rc = -EIO; return rc; } @@ -282,14 +497,15 @@ static int bnxt_init_nic(struct bnxt *bp) { int rc; - bnxt_init_ring_grps(bp); + if (BNXT_HAS_RING_GRPS(bp)) { + rc = bnxt_init_ring_grps(bp); + if (rc) + return rc; + } + bnxt_init_vnics(bp); bnxt_init_filters(bp); - rc = bnxt_init_chip(bp); - if (rc) - return rc; - return 0; } @@ -297,41 +513,48 @@ static int bnxt_init_nic(struct bnxt *bp) * Device configuration and status function */ -static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, - struct rte_eth_dev_info *dev_info) +static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *dev_info) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); + struct bnxt *bp = eth_dev->data->dev_private; uint16_t max_vnics, i, j, vpool, vrxq; + unsigned int max_rx_rings; + int rc; - dev_info->pci_dev = eth_dev->pci_dev; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; /* MAC Specifics */ - dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR; + dev_info->max_mac_addrs = bp->max_l2_ctx; dev_info->max_hash_mac_addrs = 0; /* PF/VF specifics */ - if (BNXT_PF(bp)) { - dev_info->max_rx_queues = bp->pf.max_rx_rings; - dev_info->max_tx_queues = bp->pf.max_tx_rings; - dev_info->max_vfs = bp->pf.active_vfs; - dev_info->reta_size = bp->pf.max_rsscos_ctx; - max_vnics = bp->pf.max_vnics; - } else { - dev_info->max_rx_queues = bp->vf.max_rx_rings; - dev_info->max_tx_queues = bp->vf.max_tx_rings; - dev_info->reta_size = bp->vf.max_rsscos_ctx; - max_vnics = bp->vf.max_vnics; - } + if (BNXT_PF(bp)) + dev_info->max_vfs = pdev->max_vfs; + + max_rx_rings = RTE_MIN(bp->max_rx_rings, bp->max_stat_ctx); + /* For the sake of symmetry, max_rx_queues = max_tx_queues */ + dev_info->max_rx_queues = max_rx_rings; + dev_info->max_tx_queues = max_rx_rings; + dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); + dev_info->hash_key_size = 40; + max_vnics = bp->max_vnics; + + /* MTU specifics */ + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->max_mtu = BNXT_MAX_MTU; /* Fast path specifics */ dev_info->min_rx_bufsize = 1; - dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN - + VLAN_TAG_SIZE; - dev_info->rx_offload_capa = 0; - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; + + dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; + if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; + dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT; + dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; /* *INDENT-OFF* */ dev_info->default_rxconf = (struct rte_eth_rxconf) { @@ -341,7 +564,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, .wthresh = 0, }, .rx_free_thresh = 32, - .rx_drop_en = 0, + /* If no descriptors available, pkts are dropped by default */ + .rx_drop_en = 1, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -352,11 +576,15 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, }, .tx_free_thresh = 32, .tx_rs_thresh = 32, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, }; eth_dev->data->dev_conf.intr_conf.lsc = 1; + eth_dev->data->dev_conf.intr_conf.rxq = 1; + dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; + dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; + dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; + dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; + /* *INDENT-ON* */ /* @@ -389,41 +617,88 @@ found: dev_info->vmdq_pool_base = 0; dev_info->vmdq_queue_base = 0; + + return 0; } /* Configure the device based on the configuration provided */ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; + int rc; bp->rx_queues = (void *)eth_dev->data->rx_queues; bp->tx_queues = (void *)eth_dev->data->tx_queues; + bp->tx_nr_rings = eth_dev->data->nb_tx_queues; + bp->rx_nr_rings = eth_dev->data->nb_rx_queues; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { + rc = bnxt_hwrm_check_vf_rings(bp); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); + return -ENOSPC; + } + + rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); + return -ENOSPC; + } + } else { + /* legacy driver needs to get updated values */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) { + PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); + return rc; + } + } /* Inherit new configurations */ - bp->rx_nr_rings = eth_dev->data->nb_rx_queues; - bp->tx_nr_rings = eth_dev->data->nb_tx_queues; + if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || + eth_dev->data->nb_tx_queues > bp->max_tx_rings || + eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues + + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || + eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > + bp->max_stat_ctx) + goto resource_error; + + if (BNXT_HAS_RING_GRPS(bp) && + (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) + goto resource_error; + + if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && + bp->max_vnics < eth_dev->data->nb_rx_queues) + goto resource_error; + bp->rx_cp_nr_rings = bp->rx_nr_rings; bp->tx_cp_nr_rings = bp->tx_nr_rings; - if (eth_dev->data->dev_conf.rxmode.jumbo_frame) + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { eth_dev->data->mtu = - eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE; + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - + RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE * + BNXT_NUM_VLANS; + bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); + } return 0; -} - -static inline int -rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = ð_dev->data->dev_link; - struct rte_eth_link *src = link; - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return 1; - - return 0; +resource_error: + PMD_DRV_LOG(ERR, + "Insufficient resources to support requested config\n"); + PMD_DRV_LOG(ERR, + "Num Queues Requested: Tx %d, Rx %d\n", + eth_dev->data->nb_tx_queues, + eth_dev->data->nb_rx_queues); + PMD_DRV_LOG(ERR, + "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", + bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, + bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); + return -ENOSPC; } static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) @@ -431,110 +706,251 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) struct rte_eth_link *link = ð_dev->data->dev_link; if (link->link_status) - RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n", - (uint8_t)(eth_dev->data->port_id), + PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", + eth_dev->data->port_id, (uint32_t)link->link_speed, (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex\n")); else - RTE_LOG(INFO, PMD, "Port %d Link Down\n", - (uint8_t)(eth_dev->data->port_id)); + PMD_DRV_LOG(INFO, "Port %d Link Down\n", + eth_dev->data->port_id); } -static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev) +/* + * Determine whether the current configuration requires support for scattered + * receive; return 1 if scattered receive is required and 0 if not. + */ +static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) { - bnxt_print_link_info(eth_dev); + uint16_t buf_size; + int i; + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; + + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) + return 1; + } return 0; } +static eth_rx_burst_t +bnxt_receive_function(__rte_unused struct rte_eth_dev *eth_dev) +{ +#ifdef RTE_ARCH_X86 +#ifndef RTE_LIBRTE_IEEE1588 + /* + * Vector mode receive can be enabled only if scatter rx is not + * in use and rx offloads are limited to VLAN stripping and + * CRC stripping. + */ + if (!eth_dev->data->scattered_rx && + !(eth_dev->data->dev_conf.rxmode.offloads & + ~(DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_VLAN_FILTER))) { + PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n", + eth_dev->data->port_id); + return bnxt_recv_pkts_vec; + } + PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", + eth_dev->data->port_id); + PMD_DRV_LOG(INFO, + "Port %d scatter: %d rx offload: %" PRIX64 "\n", + eth_dev->data->port_id, + eth_dev->data->scattered_rx, + eth_dev->data->dev_conf.rxmode.offloads); +#endif +#endif + return bnxt_recv_pkts; +} + +static eth_tx_burst_t +bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev) +{ +#ifdef RTE_ARCH_X86 +#ifndef RTE_LIBRTE_IEEE1588 + /* + * Vector mode transmit can be enabled only if not using scatter rx + * or tx offloads. + */ + if (!eth_dev->data->scattered_rx && + !eth_dev->data->dev_conf.txmode.offloads) { + PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n", + eth_dev->data->port_id); + return bnxt_xmit_pkts_vec; + } + PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", + eth_dev->data->port_id); + PMD_DRV_LOG(INFO, + "Port %d scatter: %d tx offload: %" PRIX64 "\n", + eth_dev->data->port_id, + eth_dev->data->scattered_rx, + eth_dev->data->dev_conf.txmode.offloads); +#endif +#endif + return bnxt_xmit_pkts; +} + +static int bnxt_handle_if_change_status(struct bnxt *bp) +{ + int rc; + + /* Since fw has undergone a reset and lost all contexts, + * set fatal flag to not issue hwrm during cleanup + */ + bp->flags |= BNXT_FLAG_FATAL_ERROR; + bnxt_uninit_resources(bp, true); + + /* clear fatal flag so that re-init happens */ + bp->flags &= ~BNXT_FLAG_FATAL_ERROR; + rc = bnxt_init_resources(bp, true); + + bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; + + return rc; +} + static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; + int vlan_mask = 0; int rc; - bp->dev_stopped = 0; - rc = bnxt_hwrm_func_reset(bp); - if (rc) { - RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc); - rc = -1; - goto error; + if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { + PMD_DRV_LOG(ERR, + "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", + bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); } - rc = bnxt_setup_int(bp); - if (rc) - goto error; + bnxt_enable_int(bp); + rc = bnxt_hwrm_if_change(bp, 1); + if (!rc) { + if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { + rc = bnxt_handle_if_change_status(bp); + if (rc) + return rc; + } + } - rc = bnxt_alloc_mem(bp); + rc = bnxt_init_chip(bp); if (rc) goto error; - rc = bnxt_request_int(bp); - if (rc) - goto error; + eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); + + bnxt_link_update_op(eth_dev, 1); - rc = bnxt_init_nic(bp); + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + vlan_mask |= ETH_VLAN_FILTER_MASK; + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + vlan_mask |= ETH_VLAN_STRIP_MASK; + rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); if (rc) goto error; - bnxt_enable_int(bp); + eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); + eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); - bnxt_link_update_op(eth_dev, 0); + bp->flags |= BNXT_FLAG_INIT_DONE; + eth_dev->data->dev_started = 1; + bp->dev_stopped = 0; + bnxt_schedule_fw_health_check(bp); return 0; error: + bnxt_hwrm_if_change(bp, 0); bnxt_shutdown_nic(bp); - bnxt_disable_int(bp); - bnxt_free_int(bp); bnxt_free_tx_mbufs(bp); bnxt_free_rx_mbufs(bp); - bnxt_free_mem(bp); return rc; } static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; + int rc = 0; + + if (!bp->link_info.link_up) + rc = bnxt_set_hwrm_link_config(bp, true); + if (!rc) + eth_dev->data->dev_link.link_status = 1; - eth_dev->data->dev_link.link_status = 1; - bnxt_set_hwrm_link_config(bp, true); + bnxt_print_link_info(eth_dev); return 0; } static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; eth_dev->data->dev_link.link_status = 0; bnxt_set_hwrm_link_config(bp, false); + bp->link_info.link_up = 0; + return 0; } /* Unload the driver, release resources */ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + eth_dev->data->dev_started = 0; + /* Prevent crashes when queues are still in use */ + eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; + eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; + + bnxt_disable_int(bp); + + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + + bnxt_cancel_fw_health_check(bp); + + bp->flags &= ~BNXT_FLAG_INIT_DONE; if (bp->eth_dev->data->dev_started) { /* TBD: STOP HW queues DMA */ eth_dev->data->dev_link.link_status = 0; } - bnxt_set_hwrm_link_config(bp, false); - bnxt_disable_int(bp); - bnxt_free_int(bp); + bnxt_dev_set_link_down_op(eth_dev); + /* Wait for link to be reset and the async notification to process. */ + rte_delay_ms(BNXT_LINK_WAIT_INTERVAL * 2); + + /* Clean queue intr-vector mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + + bnxt_hwrm_port_clr_stats(bp); + bnxt_free_tx_mbufs(bp); + bnxt_free_rx_mbufs(bp); + /* Process any remaining notifications in default completion queue */ + bnxt_int_handler(eth_dev); bnxt_shutdown_nic(bp); + bnxt_hwrm_if_change(bp, 0); bp->dev_stopped = 1; } static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; if (bp->dev_stopped == 0) bnxt_dev_stop_op(eth_dev); - bnxt_free_tx_mbufs(bp); - bnxt_free_rx_mbufs(bp); - bnxt_free_mem(bp); if (eth_dev->data->mac_addrs != NULL) { rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; @@ -543,89 +959,111 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) rte_free(bp->grp_info); bp->grp_info = NULL; } + + bnxt_dev_uninit(eth_dev); } static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, uint32_t index) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; struct bnxt_vnic_info *vnic; struct bnxt_filter_info *filter, *temp_filter; - int i; + uint32_t i; + + if (is_bnxt_in_error(bp)) + return; /* * Loop through all VNICs from the specified filter flow pools to * remove the corresponding MAC addr filter */ - for (i = 0; i < MAX_FF_POOLS; i++) { + for (i = 0; i < bp->nr_vnics; i++) { if (!(pool_mask & (1ULL << i))) continue; - STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { - filter = STAILQ_FIRST(&vnic->filter); - while (filter) { - temp_filter = STAILQ_NEXT(filter, next); - if (filter->mac_index == index) { - STAILQ_REMOVE(&vnic->filter, filter, - bnxt_filter_info, next); - bnxt_hwrm_clear_filter(bp, filter); - filter->mac_index = INVALID_MAC_INDEX; - memset(&filter->l2_addr, 0, - ETHER_ADDR_LEN); - STAILQ_INSERT_TAIL( - &bp->free_filter_list, - filter, next); - } - filter = temp_filter; + vnic = &bp->vnic_info[i]; + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + temp_filter = STAILQ_NEXT(filter, next); + if (filter->mac_index == index) { + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + bnxt_hwrm_clear_l2_filter(bp, filter); + filter->mac_index = INVALID_MAC_INDEX; + memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN); + STAILQ_INSERT_TAIL(&bp->free_filter_list, + filter, next); } + filter = temp_filter; } } } -static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, - struct ether_addr *mac_addr, - uint32_t index, uint32_t pool) +static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; - struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]); + struct bnxt *bp = eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; struct bnxt_filter_info *filter; + int rc = 0; - if (BNXT_VF(bp)) { - RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n"); - return; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) { + PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); + return -ENOTSUP; } if (!vnic) { - RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool); - return; + PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); + return -EINVAL; } /* Attach requested MAC address to the new l2_filter */ STAILQ_FOREACH(filter, &vnic->filter, next) { if (filter->mac_index == index) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "MAC addr already existed for pool %d\n", pool); - return; + return 0; } } filter = bnxt_alloc_filter(bp); if (!filter) { - RTE_LOG(ERR, PMD, "L2 filter alloc failed\n"); - return; + PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); + return -ENODEV; } - STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + filter->mac_index = index; - memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN); - bnxt_hwrm_set_filter(bp, vnic, filter); + memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); + filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; + + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); + if (!rc) { + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + } else { + filter->mac_index = INVALID_MAC_INDEX; + memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN); + bnxt_free_filter(bp, filter); + } + + return rc; } int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) { int rc = 0; - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct rte_eth_link new; unsigned int cnt = BNXT_LINK_WAIT_CNT; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + memset(&new, 0, sizeof(new)); do { /* Retrieve link info from hardware */ @@ -633,109 +1071,222 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) if (rc) { new.link_speed = ETH_LINK_SPEED_100M; new.link_duplex = ETH_LINK_FULL_DUPLEX; - RTE_LOG(ERR, PMD, - "Failed to retrieve link rc = 0x%x!", rc); + PMD_DRV_LOG(ERR, + "Failed to retrieve link rc = 0x%x!\n", rc); goto out; } - rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); - if (!wait_to_complete) + if (!wait_to_complete || new.link_status) break; - } while (!new.link_status && cnt--); + + rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); + } while (cnt--); out: /* Timed out or success */ if (new.link_status != eth_dev->data->dev_link.link_status || new.link_speed != eth_dev->data->dev_link.link_speed) { - rte_bnxt_atomic_write_link_status(eth_dev, &new); + rte_eth_linkstatus_set(eth_dev, &new); + + _rte_eth_dev_callback_process(eth_dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + bnxt_print_link_info(eth_dev); } return rc; } -static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) +static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic; + uint32_t old_flags; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; if (bp->vnic_info == NULL) - return; + return 0; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); + old_flags = vnic->flags; vnic->flags |= BNXT_VNIC_INFO_PROMISC; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + if (rc != 0) + vnic->flags = old_flags; + + return rc; } -static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) +static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic; + uint32_t old_flags; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; if (bp->vnic_info == NULL) - return; + return 0; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); + old_flags = vnic->flags; vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + if (rc != 0) + vnic->flags = old_flags; + + return rc; } -static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) +static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic; + uint32_t old_flags; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; if (bp->vnic_info == NULL) - return; + return 0; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); + old_flags = vnic->flags; vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + if (rc != 0) + vnic->flags = old_flags; + + return rc; } -static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) +static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic; + uint32_t old_flags; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; if (bp->vnic_info == NULL) - return; + return 0; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); + old_flags = vnic->flags; vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + if (rc != 0) + vnic->flags = old_flags; + + return rc; } -static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, +/* Return bnxt_rx_queue pointer corresponding to a given rxq. */ +static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) +{ + if (qid >= bp->rx_nr_rings) + return NULL; + + return bp->eth_dev->data->rx_queues[qid]; +} + +/* Return rxq corresponding to a given rss table ring/group ID. */ +static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) +{ + struct bnxt_rx_queue *rxq; + unsigned int i; + + if (!BNXT_HAS_RING_GRPS(bp)) { + for (i = 0; i < bp->rx_nr_rings; i++) { + rxq = bp->eth_dev->data->rx_queues[i]; + if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) + return rxq->index; + } + } else { + for (i = 0; i < bp->rx_nr_rings; i++) { + if (bp->grp_info[i].fw_grp_id == fwr) + return i; + } + } + + return INVALID_HW_RING_ID; +} + +static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; - struct bnxt_vnic_info *vnic; - int i; + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); + uint16_t idx, sft; + int i, rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (!vnic->rss_table) + return -EINVAL; if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) return -EINVAL; - if (reta_size != HW_HASH_INDEX_SIZE) { - RTE_LOG(ERR, PMD, "The configured hash table lookup size " + if (reta_size != tbl_size) { + PMD_DRV_LOG(ERR, "The configured hash table lookup size " "(%d) must equal the size supported by the hardware " - "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); + "(%d)\n", reta_size, tbl_size); return -EINVAL; } - /* Update the RSS VNIC(s) */ - for (i = 0; i < MAX_FF_POOLS; i++) { - STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { - memcpy(vnic->rss_table, reta_conf, reta_size); - bnxt_hwrm_vnic_rss_cfg(bp, vnic); + for (i = 0; i < reta_size; i++) { + struct bnxt_rx_queue *rxq; + + idx = i / RTE_RETA_GROUP_SIZE; + sft = i % RTE_RETA_GROUP_SIZE; + + if (!(reta_conf[idx].mask & (1ULL << sft))) + continue; + + rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); + if (!rxq) { + PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); + return -EINVAL; } + + if (BNXT_CHIP_THOR(bp)) { + vnic->rss_table[i * 2] = + rxq->rx_ring->rx_ring_struct->fw_ring_id; + vnic->rss_table[i * 2 + 1] = + rxq->cp_ring->cp_ring_struct->fw_ring_id; + } else { + vnic->rss_table[i] = + vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; + } + + vnic->rss_table[i] = + vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; } + + bnxt_hwrm_vnic_rss_cfg(bp, vnic); return 0; } @@ -743,10 +1294,15 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; - struct rte_intr_handle *intr_handle - = &bp->pdev->intr_handle; + uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); + uint16_t idx, sft, i; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; /* Retrieve from the default VNIC */ if (!vnic) @@ -754,18 +1310,32 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, if (!vnic->rss_table) return -EINVAL; - if (reta_size != HW_HASH_INDEX_SIZE) { - RTE_LOG(ERR, PMD, "The configured hash table lookup size " + if (reta_size != tbl_size) { + PMD_DRV_LOG(ERR, "The configured hash table lookup size " "(%d) must equal the size supported by the hardware " - "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); + "(%d)\n", reta_size, tbl_size); return -EINVAL; } - /* EW - need to revisit here copying from u64 to u16 */ - memcpy(reta_conf, vnic->rss_table, reta_size); - if (rte_intr_allow_others(intr_handle)) { - if (eth_dev->data->dev_conf.intr_conf.lsc != 0) - bnxt_dev_lsc_intr_setup(eth_dev); + for (idx = 0, i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + sft = i % RTE_RETA_GROUP_SIZE; + + if (reta_conf[idx].mask & (1ULL << sft)) { + uint16_t qid; + + if (BNXT_CHIP_THOR(bp)) + qid = bnxt_rss_to_qid(bp, + vnic->rss_table[i * 2]); + else + qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); + + if (qid == INVALID_HW_RING_ID) { + PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); + return -EINVAL; + } + reta_conf[idx].reta[sft] = qid; + } } return 0; @@ -774,11 +1344,14 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, struct rte_eth_rss_conf *rss_conf) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_vnic_info *vnic; - uint16_t hash_type = 0; - int i; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; /* * If RSS enablement were different than dev_configure, @@ -786,52 +1359,50 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, */ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { if (!rss_conf->rss_hf) - return -EINVAL; + PMD_DRV_LOG(ERR, "Hash type NONE\n"); } else { if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) return -EINVAL; } - if (rss_conf->rss_hf & ETH_RSS_IPV4) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; - if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; - if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; - if (rss_conf->rss_hf & ETH_RSS_IPV6) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; - if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; - if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; - - /* Update the RSS VNIC(s) */ - for (i = 0; i < MAX_FF_POOLS; i++) { - STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { - vnic->hash_type = hash_type; - - /* - * Use the supplied key if the key length is - * acceptable and the rss_key is not NULL - */ - if (rss_conf->rss_key && - rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) - memcpy(vnic->rss_hash_key, rss_conf->rss_key, - rss_conf->rss_key_len); - - bnxt_hwrm_vnic_rss_cfg(bp, vnic); - } + + bp->flags |= BNXT_FLAG_UPDATE_HASH; + memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf)); + + /* Update the default RSS VNIC(s) */ + vnic = &bp->vnic_info[0]; + vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); + + /* + * If hashkey is not specified, use the previously configured + * hashkey + */ + if (!rss_conf->rss_key) + goto rss_config; + + if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { + PMD_DRV_LOG(ERR, + "Invalid hashkey length, should be 16 bytes\n"); + return -EINVAL; } + memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); + +rss_config: + bnxt_hwrm_vnic_rss_cfg(bp, vnic); return 0; } static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, struct rte_eth_rss_conf *rss_conf) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; - int len; + int len, rc; uint32_t hash_types; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + /* RSS configuration is the same for all VNICs */ if (vnic && vnic->rss_hash_key) { if (rss_conf->rss_key) { @@ -871,7 +1442,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; } if (hash_types) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Unknwon RSS config from firmware (%08x), RSS disabled", vnic->hash_type); return -ENOTSUP; @@ -883,12 +1454,16 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, } static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, - struct rte_eth_fc_conf *fc_conf __rte_unused) + struct rte_eth_fc_conf *fc_conf) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct rte_eth_link link_info; int rc; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + rc = bnxt_get_hwrm_link_config(bp, &link_info); if (rc) return rc; @@ -917,10 +1492,15 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; - if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) { - RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n"); + if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { + PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n"); return -ENOTSUP; } @@ -968,215 +1548,3202 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, return bnxt_set_hwrm_link_config(bp, true); } -/* - * Initialization - */ +/* Add UDP tunneling port */ +static int +bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + struct bnxt *bp = eth_dev->data->dev_private; + uint16_t tunnel_type = 0; + int rc = 0; -static struct eth_dev_ops bnxt_dev_ops = { - .dev_infos_get = bnxt_dev_info_get_op, - .dev_close = bnxt_dev_close_op, - .dev_configure = bnxt_dev_configure_op, - .dev_start = bnxt_dev_start_op, - .dev_stop = bnxt_dev_stop_op, - .dev_set_link_up = bnxt_dev_set_link_up_op, - .dev_set_link_down = bnxt_dev_set_link_down_op, - .stats_get = bnxt_stats_get_op, - .stats_reset = bnxt_stats_reset_op, - .rx_queue_setup = bnxt_rx_queue_setup_op, - .rx_queue_release = bnxt_rx_queue_release_op, - .tx_queue_setup = bnxt_tx_queue_setup_op, - .tx_queue_release = bnxt_tx_queue_release_op, - .reta_update = bnxt_reta_update_op, - .reta_query = bnxt_reta_query_op, - .rss_hash_update = bnxt_rss_hash_update_op, - .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, - .link_update = bnxt_link_update_op, - .promiscuous_enable = bnxt_promiscuous_enable_op, - .promiscuous_disable = bnxt_promiscuous_disable_op, - .allmulticast_enable = bnxt_allmulticast_enable_op, - .allmulticast_disable = bnxt_allmulticast_disable_op, - .mac_addr_add = bnxt_mac_addr_add_op, - .mac_addr_remove = bnxt_mac_addr_remove_op, - .flow_ctrl_get = bnxt_flow_ctrl_get_op, - .flow_ctrl_set = bnxt_flow_ctrl_set_op, -}; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; -static bool bnxt_vf_pciid(uint16_t id) -{ - if (id == BROADCOM_DEV_ID_57304_VF || - id == BROADCOM_DEV_ID_57406_VF || - id == BROADCOM_DEV_ID_5731X_VF || - id == BROADCOM_DEV_ID_5741X_VF) - return true; - return false; + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + if (bp->vxlan_port_cnt) { + PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", + udp_tunnel->udp_port); + if (bp->vxlan_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Only one port allowed\n"); + return -ENOSPC; + } + bp->vxlan_port_cnt++; + return 0; + } + tunnel_type = + HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; + bp->vxlan_port_cnt++; + break; + case RTE_TUNNEL_TYPE_GENEVE: + if (bp->geneve_port_cnt) { + PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", + udp_tunnel->udp_port); + if (bp->geneve_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Only one port allowed\n"); + return -ENOSPC; + } + bp->geneve_port_cnt++; + return 0; + } + tunnel_type = + HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; + bp->geneve_port_cnt++; + break; + default: + PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); + return -ENOTSUP; + } + rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, + tunnel_type); + return rc; } -static int bnxt_init_board(struct rte_eth_dev *eth_dev) +static int +bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *udp_tunnel) { struct bnxt *bp = eth_dev->data->dev_private; - struct rte_pci_device *pci_dev = eth_dev->pci_dev; - int rc; - - /* enable device (incl. PCI PM wakeup), and bus-mastering */ - if (!pci_dev->mem_resource[0].addr) { - RTE_LOG(ERR, PMD, - "Cannot find PCI device base address, aborting\n"); - rc = -ENODEV; - goto init_err_disable; - } + uint16_t tunnel_type = 0; + uint16_t port = 0; + int rc = 0; - bp->eth_dev = eth_dev; - bp->pdev = pci_dev; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; - bp->bar0 = (void *)pci_dev->mem_resource[0].addr; - if (!bp->bar0) { - RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n"); - rc = -ENOMEM; - goto init_err_release; - } - return 0; + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + if (!bp->vxlan_port_cnt) { + PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); + return -EINVAL; + } + if (bp->vxlan_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", + udp_tunnel->udp_port, bp->vxlan_port); + return -EINVAL; + } + if (--bp->vxlan_port_cnt) + return 0; -init_err_release: - if (bp->bar0) - bp->bar0 = NULL; + tunnel_type = + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; + port = bp->vxlan_fw_dst_port_id; + break; + case RTE_TUNNEL_TYPE_GENEVE: + if (!bp->geneve_port_cnt) { + PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); + return -EINVAL; + } + if (bp->geneve_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", + udp_tunnel->udp_port, bp->geneve_port); + return -EINVAL; + } + if (--bp->geneve_port_cnt) + return 0; -init_err_disable: + tunnel_type = + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; + port = bp->geneve_fw_dst_port_id; + break; + default: + PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); + return -ENOTSUP; + } + rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); + if (!rc) { + if (tunnel_type == + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) + bp->vxlan_port = 0; + if (tunnel_type == + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) + bp->geneve_port = 0; + } return rc; } -static int -bnxt_dev_init(struct rte_eth_dev *eth_dev) +static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) { - struct rte_pci_device *pci_dev = eth_dev->pci_dev; - static int version_printed; - struct bnxt *bp; - int rc; + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic; + int rc = 0; + uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; - if (version_printed++ == 0) - RTE_LOG(INFO, PMD, "%s", bnxt_version); + /* if VLAN exists && VLAN matches vlan_id + * remove the MAC+VLAN filter + * add a new MAC only filter + * else + * VLAN filter doesn't exist, just skip and continue + */ + vnic = BNXT_GET_DEFAULT_VNIC(bp); + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + /* Search for this matching MAC+VLAN filter */ + if (filter->enables & chk && filter->l2_ivlan == vlan_id && + !memcmp(filter->l2_addr, + bp->mac_addr, + RTE_ETHER_ADDR_LEN)) { + /* Delete the filter */ + rc = bnxt_hwrm_clear_l2_filter(bp, filter); + if (rc) + return rc; + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); + + PMD_DRV_LOG(INFO, + "Del Vlan filter for %d\n", + vlan_id); + return rc; + } + filter = STAILQ_NEXT(filter, next); + } + return -ENOENT; +} - rte_eth_copy_pci_info(eth_dev, pci_dev); - bp = eth_dev->data->dev_private; +static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) +{ + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic; + int rc = 0; + uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; + uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; + + /* Implementation notes on the use of VNIC in this command: + * + * By default, these filters belong to default vnic for the function. + * Once these filters are set up, only destination VNIC can be modified. + * If the destination VNIC is not specified in this command, + * then the HWRM shall only create an l2 context id. + */ - if (bnxt_vf_pciid(pci_dev->id.device_id)) - bp->flags |= BNXT_FLAG_VF; + vnic = BNXT_GET_DEFAULT_VNIC(bp); + filter = STAILQ_FIRST(&vnic->filter); + /* Check if the VLAN has already been added */ + while (filter) { + if (filter->enables & chk && filter->l2_ivlan == vlan_id && + !memcmp(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN)) + return -EEXIST; - rc = bnxt_init_board(eth_dev); - if (rc) { - RTE_LOG(ERR, PMD, - "Board initialization failed rc: %x\n", rc); - goto error; + filter = STAILQ_NEXT(filter, next); } - eth_dev->dev_ops = &bnxt_dev_ops; - eth_dev->rx_pkt_burst = &bnxt_recv_pkts; - eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; - rc = bnxt_alloc_hwrm_resources(bp); + /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC + * command to create MAC+VLAN filter with the right flags, enables set. + */ + filter = bnxt_alloc_filter(bp); + if (!filter) { + PMD_DRV_LOG(ERR, + "MAC/VLAN filter alloc failed\n"); + return -ENOMEM; + } + /* MAC + VLAN ID filter */ + filter->l2_ivlan = vlan_id; + filter->l2_ivlan_mask = 0x0FFF; + filter->enables |= en; + filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); if (rc) { - RTE_LOG(ERR, PMD, - "hwrm resource allocation failure rc: %x\n", rc); - goto error_free; + /* Free the newly allocated filter as we were + * not able to create the filter in hardware. + */ + filter->fw_l2_filter_id = UINT64_MAX; + STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); + return rc; } - rc = bnxt_hwrm_ver_get(bp); - if (rc) - goto error_free; - bnxt_hwrm_queue_qportcfg(bp); - bnxt_hwrm_func_qcfg(bp); + /* Add this new filter to the list */ + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + PMD_DRV_LOG(INFO, + "Added Vlan filter for %d\n", vlan_id); + return rc; +} + +static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, + uint16_t vlan_id, int on) +{ + struct bnxt *bp = eth_dev->data->dev_private; + int rc; - /* Get the MAX capabilities for this function */ - rc = bnxt_hwrm_func_qcaps(bp); - if (rc) { - RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc); - goto error_free; - } - eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", - ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0); - if (eth_dev->data->mac_addrs == NULL) { - RTE_LOG(ERR, PMD, - "Failed to alloc %u bytes needed to store MAC addr tbl", - ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR); - rc = -ENOMEM; - goto error_free; - } - /* Copy the permanent MAC from the qcap response address now. */ - if (BNXT_PF(bp)) - memcpy(bp->mac_addr, bp->pf.mac_addr, sizeof(bp->mac_addr)); + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + /* These operations apply to ALL existing MAC/VLAN filters */ + if (on) + return bnxt_add_vlan_filter(bp, vlan_id); else - memcpy(bp->mac_addr, bp->vf.mac_addr, sizeof(bp->mac_addr)); - memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); - bp->grp_info = rte_zmalloc("bnxt_grp_info", - sizeof(*bp->grp_info) * bp->max_ring_grps, 0); - if (!bp->grp_info) { - RTE_LOG(ERR, PMD, - "Failed to alloc %zu bytes needed to store group info table\n", - sizeof(*bp->grp_info) * bp->max_ring_grps); - rc = -ENOMEM; - goto error_free; - } + return bnxt_del_vlan_filter(bp, vlan_id); +} - rc = bnxt_hwrm_func_driver_register(bp, 0, - bp->pf.vf_req_fwd); - if (rc) { - RTE_LOG(ERR, PMD, - "Failed to register driver"); - rc = -EBUSY; - goto error_free; +static int +bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) +{ + struct bnxt *bp = dev->data->dev_private; + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; + unsigned int i; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (mask & ETH_VLAN_FILTER_MASK) { + if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { + /* Remove any VLAN filters programmed */ + for (i = 0; i < 4095; i++) + bnxt_del_vlan_filter(bp, i); + } + PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", + !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); } - RTE_LOG(INFO, PMD, - DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n", - pci_dev->mem_resource[0].phys_addr, - pci_dev->mem_resource[0].addr); + if (mask & ETH_VLAN_STRIP_MASK) { + /* Enable or disable VLAN stripping */ + for (i = 0; i < bp->nr_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + vnic->vlan_strip = true; + else + vnic->vlan_strip = false; + bnxt_hwrm_vnic_cfg(bp, vnic); + } + PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", + !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); + } - bp->dev_stopped = 0; + if (mask & ETH_VLAN_EXTEND_MASK) { + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); + else + PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); + } return 0; - -error_free: - eth_dev->driver->eth_dev_uninit(eth_dev); -error: - return rc; } static int -bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = eth_dev->data->dev_private; - int rc; - - if (eth_dev->data->mac_addrs != NULL) { - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; +bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct bnxt *bp = dev->data->dev_private; + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; + + if (vlan_type != ETH_VLAN_TYPE_INNER && + vlan_type != ETH_VLAN_TYPE_OUTER) { + PMD_DRV_LOG(ERR, + "Unsupported vlan type."); + return -EINVAL; } - if (bp->grp_info != NULL) { - rte_free(bp->grp_info); - bp->grp_info = NULL; + if (!qinq) { + PMD_DRV_LOG(ERR, + "QinQ not enabled. Needs to be ON as we can " + "accelerate only outer vlan\n"); + return -EINVAL; } - rc = bnxt_hwrm_func_driver_unregister(bp, 0); - bnxt_free_hwrm_resources(bp); - if (bp->dev_stopped == 0) - bnxt_dev_close_op(eth_dev); - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - return rc; -} + if (vlan_type == ETH_VLAN_TYPE_OUTER) { + switch (tpid) { + case RTE_ETHER_TYPE_QINQ: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; + break; + case RTE_ETHER_TYPE_VLAN: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; + break; + case 0x9100: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; + break; + case 0x9200: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; + break; + case 0x9300: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; + break; + default: + PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); + return -EINVAL; + } + bp->outer_tpid_bd |= tpid; + PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); + } else if (vlan_type == ETH_VLAN_TYPE_INNER) { + PMD_DRV_LOG(ERR, + "Can accelerate only outer vlan in QinQ\n"); + return -EINVAL; + } -static struct eth_driver bnxt_rte_pmd = { - .pci_drv = { - .id_table = bnxt_pci_id_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | - RTE_PCI_DRV_DETACHABLE | RTE_PCI_DRV_INTR_LSC, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove - }, - .eth_dev_init = bnxt_dev_init, - .eth_dev_uninit = bnxt_dev_uninit, - .dev_private_size = sizeof(struct bnxt), + return 0; +} + +static int +bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + struct bnxt *bp = dev->data->dev_private; + /* Default Filter is tied to VNIC 0 */ + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct bnxt_filter_info *filter; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) + return -EPERM; + + if (rte_is_zero_ether_addr(addr)) + return -EINVAL; + + STAILQ_FOREACH(filter, &vnic->filter, next) { + /* Default Filter is at Index 0 */ + if (filter->mac_index != 0) + continue; + + memcpy(filter->l2_addr, addr, RTE_ETHER_ADDR_LEN); + memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); + filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; + filter->enables |= + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; + + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); + if (rc) { + memcpy(filter->l2_addr, bp->mac_addr, + RTE_ETHER_ADDR_LEN); + return rc; + } + + memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); + PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); + return 0; + } + + return 0; +} + +static int +bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct bnxt *bp = eth_dev->data->dev_private; + char *mc_addr_list = (char *)mc_addr_set; + struct bnxt_vnic_info *vnic; + uint32_t off = 0, i = 0; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + vnic = BNXT_GET_DEFAULT_VNIC(bp); + + if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { + vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; + goto allmulti; + } + + /* TODO Check for Duplicate mcast addresses */ + vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; + for (i = 0; i < nb_mc_addr; i++) { + memcpy(vnic->mc_list + off, &mc_addr_list[i], + RTE_ETHER_ADDR_LEN); + off += RTE_ETHER_ADDR_LEN; + } + + vnic->mc_addr_cnt = i; + +allmulti: + return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); +} + +static int +bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct bnxt *bp = dev->data->dev_private; + uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; + uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; + uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; + int ret; + + ret = snprintf(fw_version, fw_size, "%d.%d.%d", + fw_major, fw_minor, fw_updt); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (uint32_t)ret) + return ret; + else + return 0; +} + +static void +bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct bnxt_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = 0; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; +} + +static void +bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct bnxt_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = 0; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + +static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) +{ + struct bnxt *bp = eth_dev->data->dev_private; + uint32_t new_pkt_size; + uint32_t rc = 0; + uint32_t i; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE * BNXT_NUM_VLANS; + +#ifdef RTE_ARCH_X86 + /* + * If vector-mode tx/rx is active, disallow any MTU change that would + * require scattered receive support. + */ + if (eth_dev->data->dev_started && + (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec || + eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) && + (new_pkt_size > + eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { + PMD_DRV_LOG(ERR, + "MTU change would require scattered rx support. "); + PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); + return -EINVAL; + } +#endif + + if (new_mtu > RTE_ETHER_MTU) { + bp->flags |= BNXT_FLAG_JUMBO; + bp->eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + } else { + bp->eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + bp->flags &= ~BNXT_FLAG_JUMBO; + } + + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size; + + for (i = 0; i < bp->nr_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + uint16_t size = 0; + + vnic->mru = new_mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; + rc = bnxt_hwrm_vnic_cfg(bp, vnic); + if (rc) + break; + + size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); + size -= RTE_PKTMBUF_HEADROOM; + + if (size < new_mtu) { + rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); + if (rc) + return rc; + } + } + + PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); + + return rc; +} + +static int +bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) +{ + struct bnxt *bp = dev->data->dev_private; + uint16_t vlan = bp->vlan; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { + PMD_DRV_LOG(ERR, + "PVID cannot be modified for this function\n"); + return -ENOTSUP; + } + bp->vlan = on ? pvid : 0; + + rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); + if (rc) + bp->vlan = vlan; + return rc; +} + +static int +bnxt_dev_led_on_op(struct rte_eth_dev *dev) +{ + struct bnxt *bp = dev->data->dev_private; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + return bnxt_hwrm_port_led_cfg(bp, true); +} + +static int +bnxt_dev_led_off_op(struct rte_eth_dev *dev) +{ + struct bnxt *bp = dev->data->dev_private; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + return bnxt_hwrm_port_led_cfg(bp, false); +} + +static uint32_t +bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + uint32_t desc = 0, raw_cons = 0, cons; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_queue *rxq; + struct rx_pkt_cmpl *rxcmp; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + rxq = dev->data->rx_queues[rx_queue_id]; + cpr = rxq->cp_ring; + raw_cons = cpr->cp_raw_cons; + + while (1) { + cons = RING_CMP(cpr->cp_ring_struct, raw_cons); + rte_prefetch0(&cpr->cp_desc_ring[cons]); + rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + + if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) { + break; + } else { + raw_cons++; + desc++; + } + } + + return desc; +} + +static int +bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) +{ + struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + struct bnxt_sw_rx_bd *rx_buf; + struct rx_pkt_cmpl *rxcmp; + uint32_t cons, cp_cons; + int rc; + + if (!rxq) + return -EINVAL; + + rc = is_bnxt_in_error(rxq->bp); + if (rc) + return rc; + + cpr = rxq->cp_ring; + rxr = rxq->rx_ring; + + if (offset >= rxq->nb_rx_desc) + return -EINVAL; + + cons = RING_CMP(cpr->cp_ring_struct, offset); + cp_cons = cpr->cp_raw_cons; + rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + + if (cons > cp_cons) { + if (CMPL_VALID(rxcmp, cpr->valid)) + return RTE_ETH_RX_DESC_DONE; + } else { + if (CMPL_VALID(rxcmp, !cpr->valid)) + return RTE_ETH_RX_DESC_DONE; + } + rx_buf = &rxr->rx_buf_ring[cons]; + if (rx_buf->mbuf == NULL) + return RTE_ETH_RX_DESC_UNAVAIL; + + + return RTE_ETH_RX_DESC_AVAIL; +} + +static int +bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) +{ + struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; + struct bnxt_tx_ring_info *txr; + struct bnxt_cp_ring_info *cpr; + struct bnxt_sw_tx_bd *tx_buf; + struct tx_pkt_cmpl *txcmp; + uint32_t cons, cp_cons; + int rc; + + if (!txq) + return -EINVAL; + + rc = is_bnxt_in_error(txq->bp); + if (rc) + return rc; + + cpr = txq->cp_ring; + txr = txq->tx_ring; + + if (offset >= txq->nb_tx_desc) + return -EINVAL; + + cons = RING_CMP(cpr->cp_ring_struct, offset); + txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + cp_cons = cpr->cp_raw_cons; + + if (cons > cp_cons) { + if (CMPL_VALID(txcmp, cpr->valid)) + return RTE_ETH_TX_DESC_UNAVAIL; + } else { + if (CMPL_VALID(txcmp, !cpr->valid)) + return RTE_ETH_TX_DESC_UNAVAIL; + } + tx_buf = &txr->tx_buf_ring[cons]; + if (tx_buf->mbuf == NULL) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +static struct bnxt_filter_info * +bnxt_match_and_validate_ether_filter(struct bnxt *bp, + struct rte_eth_ethertype_filter *efilter, + struct bnxt_vnic_info *vnic0, + struct bnxt_vnic_info *vnic, + int *ret) +{ + struct bnxt_filter_info *mfilter = NULL; + int match = 0; + *ret = 0; + + if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 || + efilter->ether_type == RTE_ETHER_TYPE_IPV6) { + PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in" + " ethertype filter.", efilter->ether_type); + *ret = -EINVAL; + goto exit; + } + if (efilter->queue >= bp->rx_nr_rings) { + PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); + *ret = -EINVAL; + goto exit; + } + + vnic0 = &bp->vnic_info[0]; + vnic = &bp->vnic_info[efilter->queue]; + if (vnic == NULL) { + PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); + *ret = -EINVAL; + goto exit; + } + + if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { + STAILQ_FOREACH(mfilter, &vnic0->filter, next) { + if ((!memcmp(efilter->mac_addr.addr_bytes, + mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && + mfilter->flags == + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP && + mfilter->ethertype == efilter->ether_type)) { + match = 1; + break; + } + } + } else { + STAILQ_FOREACH(mfilter, &vnic->filter, next) + if ((!memcmp(efilter->mac_addr.addr_bytes, + mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && + mfilter->ethertype == efilter->ether_type && + mfilter->flags == + HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) { + match = 1; + break; + } + } + + if (match) + *ret = -EEXIST; + +exit: + return mfilter; +} + +static int +bnxt_ethertype_filter(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct bnxt *bp = dev->data->dev_private; + struct rte_eth_ethertype_filter *efilter = + (struct rte_eth_ethertype_filter *)arg; + struct bnxt_filter_info *bfilter, *filter1; + struct bnxt_vnic_info *vnic, *vnic0; + int ret; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + vnic0 = &bp->vnic_info[0]; + vnic = &bp->vnic_info[efilter->queue]; + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + bnxt_match_and_validate_ether_filter(bp, efilter, + vnic0, vnic, &ret); + if (ret < 0) + return ret; + + bfilter = bnxt_get_unused_filter(bp); + if (bfilter == NULL) { + PMD_DRV_LOG(ERR, + "Not enough resources for a new filter.\n"); + return -ENOMEM; + } + bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; + memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes, + RTE_ETHER_ADDR_LEN); + memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes, + RTE_ETHER_ADDR_LEN); + bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; + bfilter->ethertype = efilter->ether_type; + bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + + filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); + if (filter1 == NULL) { + ret = -EINVAL; + goto cleanup; + } + bfilter->enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; + + bfilter->dst_id = vnic->fw_vnic_id; + + if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { + bfilter->flags = + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; + } + + ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); + if (ret) + goto cleanup; + STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); + break; + case RTE_ETH_FILTER_DELETE: + filter1 = bnxt_match_and_validate_ether_filter(bp, efilter, + vnic0, vnic, &ret); + if (ret == -EEXIST) { + ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1); + + STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info, + next); + bnxt_free_filter(bp, filter1); + } else if (ret == 0) { + PMD_DRV_LOG(ERR, "No matching filter found\n"); + } + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + goto error; + } + return ret; +cleanup: + bnxt_free_filter(bp, bfilter); +error: + return ret; +} + +static inline int +parse_ntuple_filter(struct bnxt *bp, + struct rte_eth_ntuple_filter *nfilter, + struct bnxt_filter_info *bfilter) +{ + uint32_t en = 0; + + if (nfilter->queue >= bp->rx_nr_rings) { + PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue); + return -EINVAL; + } + + switch (nfilter->dst_port_mask) { + case UINT16_MAX: + bfilter->dst_port_mask = -1; + bfilter->dst_port = nfilter->dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + + switch (nfilter->proto_mask) { + case UINT8_MAX: + if (nfilter->proto == 17) /* IPPROTO_UDP */ + bfilter->ip_protocol = 17; + else if (nfilter->proto == 6) /* IPPROTO_TCP */ + bfilter->ip_protocol = 6; + else + return -EINVAL; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + switch (nfilter->dst_ip_mask) { + case UINT32_MAX: + bfilter->dst_ipaddr_mask[0] = -1; + bfilter->dst_ipaddr[0] = nfilter->dst_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (nfilter->src_ip_mask) { + case UINT32_MAX: + bfilter->src_ipaddr_mask[0] = -1; + bfilter->src_ipaddr[0] = nfilter->src_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + switch (nfilter->src_port_mask) { + case UINT16_MAX: + bfilter->src_port_mask = -1; + bfilter->src_port = nfilter->src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_port mask."); + return -EINVAL; + } + + //TODO Priority + //nfilter->priority = (uint8_t)filter->priority; + + bfilter->enables = en; + return 0; +} + +static struct bnxt_filter_info* +bnxt_match_ntuple_filter(struct bnxt *bp, + struct bnxt_filter_info *bfilter, + struct bnxt_vnic_info **mvnic) +{ + struct bnxt_filter_info *mfilter = NULL; + int i; + + for (i = bp->nr_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + STAILQ_FOREACH(mfilter, &vnic->filter, next) { + if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] && + bfilter->src_ipaddr_mask[0] == + mfilter->src_ipaddr_mask[0] && + bfilter->src_port == mfilter->src_port && + bfilter->src_port_mask == mfilter->src_port_mask && + bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] && + bfilter->dst_ipaddr_mask[0] == + mfilter->dst_ipaddr_mask[0] && + bfilter->dst_port == mfilter->dst_port && + bfilter->dst_port_mask == mfilter->dst_port_mask && + bfilter->flags == mfilter->flags && + bfilter->enables == mfilter->enables) { + if (mvnic) + *mvnic = vnic; + return mfilter; + } + } + } + return NULL; +} + +static int +bnxt_cfg_ntuple_filter(struct bnxt *bp, + struct rte_eth_ntuple_filter *nfilter, + enum rte_filter_op filter_op) +{ + struct bnxt_filter_info *bfilter, *mfilter, *filter1; + struct bnxt_vnic_info *vnic, *vnic0, *mvnic; + int ret; + + if (nfilter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } + + if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { + PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n"); + return -EINVAL; + } + + bfilter = bnxt_get_unused_filter(bp); + if (bfilter == NULL) { + PMD_DRV_LOG(ERR, + "Not enough resources for a new filter.\n"); + return -ENOMEM; + } + ret = parse_ntuple_filter(bp, nfilter, bfilter); + if (ret < 0) + goto free_filter; + + vnic = &bp->vnic_info[nfilter->queue]; + vnic0 = &bp->vnic_info[0]; + filter1 = STAILQ_FIRST(&vnic0->filter); + if (filter1 == NULL) { + ret = -EINVAL; + goto free_filter; + } + + bfilter->dst_id = vnic->fw_vnic_id; + bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; + bfilter->enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + bfilter->ethertype = 0x800; + bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + + mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic); + + if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && + bfilter->dst_id == mfilter->dst_id) { + PMD_DRV_LOG(ERR, "filter exists.\n"); + ret = -EEXIST; + goto free_filter; + } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && + bfilter->dst_id != mfilter->dst_id) { + mfilter->dst_id = vnic->fw_vnic_id; + ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter); + STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next); + STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next); + PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n"); + PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n"); + goto free_filter; + } + if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + ret = -ENOENT; + goto free_filter; + } + + if (filter_op == RTE_ETH_FILTER_ADD) { + bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; + ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); + if (ret) + goto free_filter; + STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); + } else { + if (mfilter == NULL) { + /* This should not happen. But for Coverity! */ + ret = -ENOENT; + goto free_filter; + } + ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter); + + STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next); + bnxt_free_filter(bp, mfilter); + mfilter->fw_l2_filter_id = -1; + bnxt_free_filter(bp, bfilter); + bfilter->fw_l2_filter_id = -1; + } + + return 0; +free_filter: + bfilter->fw_l2_filter_id = -1; + bnxt_free_filter(bp, bfilter); + return ret; +} + +static int +bnxt_ntuple_filter(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct bnxt *bp = dev->data->dev_private; + int ret; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = bnxt_cfg_ntuple_filter(bp, + (struct rte_eth_ntuple_filter *)arg, + filter_op); + break; + case RTE_ETH_FILTER_DELETE: + ret = bnxt_cfg_ntuple_filter(bp, + (struct rte_eth_ntuple_filter *)arg, + filter_op); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +bnxt_parse_fdir_filter(struct bnxt *bp, + struct rte_eth_fdir_filter *fdir, + struct bnxt_filter_info *filter) +{ + enum rte_fdir_mode fdir_mode = + bp->eth_dev->data->dev_conf.fdir_conf.mode; + struct bnxt_vnic_info *vnic0, *vnic; + struct bnxt_filter_info *filter1; + uint32_t en = 0; + int i; + + if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL) + return -EINVAL; + + filter->l2_ovlan = fdir->input.flow_ext.vlan_tci; + en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; + + switch (fdir->input.flow_type) { + case RTE_ETH_FLOW_IPV4: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + /* FALLTHROUGH */ + filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + filter->ip_protocol = fdir->input.flow.ip4_flow.proto; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + filter->src_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->dst_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + filter->ethertype = 0x800; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + filter->src_port = fdir->input.flow.tcp4_flow.src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; + filter->dst_port = fdir->input.flow.tcp4_flow.dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + filter->dst_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + filter->src_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + filter->ip_protocol = 6; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + filter->src_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->dst_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + filter->ethertype = 0x800; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + filter->src_port = fdir->input.flow.udp4_flow.src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; + filter->dst_port = fdir->input.flow.udp4_flow.dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + filter->dst_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + filter->src_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + filter->ip_protocol = 17; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + filter->src_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->dst_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + filter->ethertype = 0x800; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_IPV6: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + /* FALLTHROUGH */ + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; + filter->ip_protocol = fdir->input.flow.ipv6_flow.proto; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + rte_memcpy(filter->src_ipaddr, + fdir->input.flow.ipv6_flow.src_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + rte_memcpy(filter->dst_ipaddr, + fdir->input.flow.ipv6_flow.dst_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + memset(filter->dst_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + memset(filter->src_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->ethertype = 0x86dd; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + filter->src_port = fdir->input.flow.tcp6_flow.src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; + filter->dst_port = fdir->input.flow.tcp6_flow.dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + filter->dst_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + filter->src_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; + filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + rte_memcpy(filter->src_ipaddr, + fdir->input.flow.tcp6_flow.ip.src_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + rte_memcpy(filter->dst_ipaddr, + fdir->input.flow.tcp6_flow.ip.dst_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + memset(filter->dst_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + memset(filter->src_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->ethertype = 0x86dd; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + filter->src_port = fdir->input.flow.udp6_flow.src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; + filter->dst_port = fdir->input.flow.udp6_flow.dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + filter->dst_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + filter->src_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; + filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + rte_memcpy(filter->src_ipaddr, + fdir->input.flow.udp6_flow.ip.src_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + rte_memcpy(filter->dst_ipaddr, + fdir->input.flow.udp6_flow.ip.dst_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + memset(filter->dst_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + memset(filter->src_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->ethertype = 0x86dd; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_L2_PAYLOAD: + filter->ethertype = fdir->input.flow.l2_flow.ether_type; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_VXLAN: + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) + return -EINVAL; + filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; + break; + case RTE_ETH_FLOW_NVGRE: + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) + return -EINVAL; + filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; + en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; + break; + case RTE_ETH_FLOW_UNKNOWN: + case RTE_ETH_FLOW_RAW: + case RTE_ETH_FLOW_FRAG_IPV4: + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: + case RTE_ETH_FLOW_FRAG_IPV6: + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: + case RTE_ETH_FLOW_IPV6_EX: + case RTE_ETH_FLOW_IPV6_TCP_EX: + case RTE_ETH_FLOW_IPV6_UDP_EX: + case RTE_ETH_FLOW_GENEVE: + /* FALLTHROUGH */ + default: + return -EINVAL; + } + + vnic0 = &bp->vnic_info[0]; + vnic = &bp->vnic_info[fdir->action.rx_queue]; + if (vnic == NULL) { + PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue); + return -EINVAL; + } + + + if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + rte_memcpy(filter->dst_macaddr, + fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; + } + + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) { + filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; + filter1 = STAILQ_FIRST(&vnic0->filter); + //filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + } else { + filter->dst_id = vnic->fw_vnic_id; + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) + if (filter->dst_macaddr[i] == 0x00) + filter1 = STAILQ_FIRST(&vnic0->filter); + else + filter1 = bnxt_get_l2_filter(bp, filter, vnic); + } + + if (filter1 == NULL) + return -EINVAL; + + en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + + filter->enables = en; + + return 0; +} + +static struct bnxt_filter_info * +bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf, + struct bnxt_vnic_info **mvnic) +{ + struct bnxt_filter_info *mf = NULL; + int i; + + for (i = bp->nr_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + STAILQ_FOREACH(mf, &vnic->filter, next) { + if (mf->filter_type == nf->filter_type && + mf->flags == nf->flags && + mf->src_port == nf->src_port && + mf->src_port_mask == nf->src_port_mask && + mf->dst_port == nf->dst_port && + mf->dst_port_mask == nf->dst_port_mask && + mf->ip_protocol == nf->ip_protocol && + mf->ip_addr_type == nf->ip_addr_type && + mf->ethertype == nf->ethertype && + mf->vni == nf->vni && + mf->tunnel_type == nf->tunnel_type && + mf->l2_ovlan == nf->l2_ovlan && + mf->l2_ovlan_mask == nf->l2_ovlan_mask && + mf->l2_ivlan == nf->l2_ivlan && + mf->l2_ivlan_mask == nf->l2_ivlan_mask && + !memcmp(mf->l2_addr, nf->l2_addr, + RTE_ETHER_ADDR_LEN) && + !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, + RTE_ETHER_ADDR_LEN) && + !memcmp(mf->src_macaddr, nf->src_macaddr, + RTE_ETHER_ADDR_LEN) && + !memcmp(mf->dst_macaddr, nf->dst_macaddr, + RTE_ETHER_ADDR_LEN) && + !memcmp(mf->src_ipaddr, nf->src_ipaddr, + sizeof(nf->src_ipaddr)) && + !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, + sizeof(nf->src_ipaddr_mask)) && + !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, + sizeof(nf->dst_ipaddr)) && + !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, + sizeof(nf->dst_ipaddr_mask))) { + if (mvnic) + *mvnic = vnic; + return mf; + } + } + } + return NULL; +} + +static int +bnxt_fdir_filter(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct bnxt *bp = dev->data->dev_private; + struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; + struct bnxt_filter_info *filter, *match; + struct bnxt_vnic_info *vnic, *mvnic; + int ret = 0, i; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) + return -EINVAL; + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + case RTE_ETH_FILTER_DELETE: + /* FALLTHROUGH */ + filter = bnxt_get_unused_filter(bp); + if (filter == NULL) { + PMD_DRV_LOG(ERR, + "Not enough resources for a new flow.\n"); + return -ENOMEM; + } + + ret = bnxt_parse_fdir_filter(bp, fdir, filter); + if (ret != 0) + goto free_filter; + filter->filter_type = HWRM_CFA_NTUPLE_FILTER; + + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) + vnic = &bp->vnic_info[0]; + else + vnic = &bp->vnic_info[fdir->action.rx_queue]; + + match = bnxt_match_fdir(bp, filter, &mvnic); + if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { + if (match->dst_id == vnic->fw_vnic_id) { + PMD_DRV_LOG(ERR, "Flow already exists.\n"); + ret = -EEXIST; + goto free_filter; + } else { + match->dst_id = vnic->fw_vnic_id; + ret = bnxt_hwrm_set_ntuple_filter(bp, + match->dst_id, + match); + STAILQ_REMOVE(&mvnic->filter, match, + bnxt_filter_info, next); + STAILQ_INSERT_TAIL(&vnic->filter, match, next); + PMD_DRV_LOG(ERR, + "Filter with matching pattern exist\n"); + PMD_DRV_LOG(ERR, + "Updated it to new destination q\n"); + goto free_filter; + } + } + if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { + PMD_DRV_LOG(ERR, "Flow does not exist.\n"); + ret = -ENOENT; + goto free_filter; + } + + if (filter_op == RTE_ETH_FILTER_ADD) { + ret = bnxt_hwrm_set_ntuple_filter(bp, + filter->dst_id, + filter); + if (ret) + goto free_filter; + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + } else { + ret = bnxt_hwrm_clear_ntuple_filter(bp, match); + STAILQ_REMOVE(&vnic->filter, match, + bnxt_filter_info, next); + bnxt_free_filter(bp, match); + filter->fw_l2_filter_id = -1; + bnxt_free_filter(bp, filter); + } + break; + case RTE_ETH_FILTER_FLUSH: + for (i = bp->nr_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + STAILQ_FOREACH(filter, &vnic->filter, next) { + if (filter->filter_type == + HWRM_CFA_NTUPLE_FILTER) { + ret = + bnxt_hwrm_clear_ntuple_filter(bp, + filter); + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + } + } + } + return ret; + case RTE_ETH_FILTER_UPDATE: + case RTE_ETH_FILTER_STATS: + case RTE_ETH_FILTER_INFO: + PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); + ret = -EINVAL; + break; + } + return ret; + +free_filter: + filter->fw_l2_filter_id = -1; + bnxt_free_filter(bp, filter); + return ret; +} + +static int +bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) +{ + int ret = 0; + + ret = is_bnxt_in_error(dev->data->dev_private); + if (ret) + return ret; + + switch (filter_type) { + case RTE_ETH_FILTER_TUNNEL: + PMD_DRV_LOG(ERR, + "filter type: %d: To be implemented\n", filter_type); + break; + case RTE_ETH_FILTER_FDIR: + ret = bnxt_fdir_filter(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_NTUPLE: + ret = bnxt_ntuple_filter(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = bnxt_ethertype_filter(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &bnxt_flow_ops; + break; + default: + PMD_DRV_LOG(ERR, + "Filter type (%d) not supported", filter_type); + ret = -EINVAL; + break; + } + return ret; +} + +static const uint32_t * +bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L4_ICMP, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (!dev->rx_pkt_burst) + return NULL; + + return ptypes; +} + +static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, + int reg_win) +{ + uint32_t reg_base = *reg_arr & 0xfffff000; + uint32_t win_off; + int i; + + for (i = 0; i < count; i++) { + if ((reg_arr[i] & 0xfffff000) != reg_base) + return -ERANGE; + } + win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; + rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); + return 0; +} + +static int bnxt_map_ptp_regs(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t *reg_arr; + int rc, i; + + reg_arr = ptp->rx_regs; + rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); + if (rc) + return rc; + + reg_arr = ptp->tx_regs; + rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); + if (rc) + return rc; + + for (i = 0; i < BNXT_PTP_RX_REGS; i++) + ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); + + for (i = 0; i < BNXT_PTP_TX_REGS; i++) + ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); + + return 0; +} + +static void bnxt_unmap_ptp_regs(struct bnxt *bp) +{ + rte_write32(0, (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); + rte_write32(0, (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); +} + +static uint64_t bnxt_cc_read(struct bnxt *bp) +{ + uint64_t ns; + + ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_SYNC_TIME)); + ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; + return ns; +} + +static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t fifo; + + fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); + if (fifo & BNXT_PTP_TX_FIFO_EMPTY) + return -EAGAIN; + + fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); + *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); + *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; + + return 0; +} + +static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct bnxt_pf_info *pf = &bp->pf; + uint16_t port_id; + uint32_t fifo; + + if (!ptp) + return -ENODEV; + + fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); + if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) + return -EAGAIN; + + port_id = pf->port_id; + rte_write32(1 << port_id, (uint8_t *)bp->bar0 + + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); + + fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); + if (fifo & BNXT_PTP_RX_FIFO_PENDING) { +/* bnxt_clr_rx_ts(bp); TBD */ + return -EBUSY; + } + + *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); + *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; + + return 0; +} + +static int +bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) +{ + uint64_t ns; + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (!ptp) + return 0; + + ns = rte_timespec_to_ns(ts); + /* Set the timecounters to a new value. */ + ptp->tc.nsec = ns; + + return 0; +} + +static int +bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint64_t ns, systime_cycles = 0; + int rc = 0; + + if (!ptp) + return 0; + + if (BNXT_CHIP_THOR(bp)) + rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, + &systime_cycles); + else + systime_cycles = bnxt_cc_read(bp); + + ns = rte_timecounter_update(&ptp->tc, systime_cycles); + *ts = rte_ns_to_timespec(ns); + + return rc; +} +static int +bnxt_timesync_enable(struct rte_eth_dev *dev) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t shift = 0; + int rc; + + if (!ptp) + return 0; + + ptp->rx_filter = 1; + ptp->tx_tstamp_en = 1; + ptp->rxctl = BNXT_PTP_MSG_EVENTS; + + rc = bnxt_hwrm_ptp_cfg(bp); + if (rc) + return rc; + + memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); + memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; + ptp->tc.cc_shift = shift; + ptp->tc.nsec_mask = (1ULL << shift) - 1; + + ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; + ptp->rx_tstamp_tc.cc_shift = shift; + ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; + + ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; + ptp->tx_tstamp_tc.cc_shift = shift; + ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; + + if (!BNXT_CHIP_THOR(bp)) + bnxt_map_ptp_regs(bp); + + return 0; +} + +static int +bnxt_timesync_disable(struct rte_eth_dev *dev) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (!ptp) + return 0; + + ptp->rx_filter = 0; + ptp->tx_tstamp_en = 0; + ptp->rxctl = 0; + + bnxt_hwrm_ptp_cfg(bp); + + if (!BNXT_CHIP_THOR(bp)) + bnxt_unmap_ptp_regs(bp); + + return 0; +} + +static int +bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags __rte_unused) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint64_t rx_tstamp_cycles = 0; + uint64_t ns; + + if (!ptp) + return 0; + + if (BNXT_CHIP_THOR(bp)) + rx_tstamp_cycles = ptp->rx_timestamp; + else + bnxt_get_rx_ts(bp, &rx_tstamp_cycles); + + ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + return 0; +} + +static int +bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint64_t tx_tstamp_cycles = 0; + uint64_t ns; + int rc = 0; + + if (!ptp) + return 0; + + if (BNXT_CHIP_THOR(bp)) + rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, + &tx_tstamp_cycles); + else + rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); + + ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return rc; +} + +static int +bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (!ptp) + return 0; + + ptp->tc.nsec += delta; + + return 0; +} + +static int +bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) +{ + struct bnxt *bp = dev->data->dev_private; + int rc; + uint32_t dir_entries; + uint32_t entry_length; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n", + bp->pdev->addr.domain, bp->pdev->addr.bus, + bp->pdev->addr.devid, bp->pdev->addr.function); + + rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); + if (rc != 0) + return rc; + + return dir_entries * entry_length; +} + +static int +bnxt_get_eeprom_op(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct bnxt *bp = dev->data->dev_private; + uint32_t index; + uint32_t offset; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d " + "len = %d\n", bp->pdev->addr.domain, + bp->pdev->addr.bus, bp->pdev->addr.devid, + bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); + + if (in_eeprom->offset == 0) /* special offset value to get directory */ + return bnxt_get_nvram_directory(bp, in_eeprom->length, + in_eeprom->data); + + index = in_eeprom->offset >> 24; + offset = in_eeprom->offset & 0xffffff; + + if (index != 0) + return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, + in_eeprom->length, in_eeprom->data); + + return 0; +} + +static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_CHIMP_PATCH: + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + case BNX_DIR_TYPE_APE_FW: + case BNX_DIR_TYPE_APE_PATCH: + case BNX_DIR_TYPE_KONG_FW: + case BNX_DIR_TYPE_KONG_PATCH: + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: + /* FALLTHROUGH */ + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_AVS: + case BNX_DIR_TYPE_EXP_ROM_MBA: + case BNX_DIR_TYPE_PCIE: + case BNX_DIR_TYPE_TSCF_UCODE: + case BNX_DIR_TYPE_EXT_PHY: + case BNX_DIR_TYPE_CCM: + case BNX_DIR_TYPE_ISCSI_BOOT: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: + /* FALLTHROUGH */ + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_executable(uint16_t dir_type) +{ + return bnxt_dir_type_is_ape_bin_format(dir_type) || + bnxt_dir_type_is_other_exec_format(dir_type); +} + +static int +bnxt_set_eeprom_op(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct bnxt *bp = dev->data->dev_private; + uint8_t index, dir_op; + uint16_t type, ext, ordinal, attr; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d " + "len = %d\n", bp->pdev->addr.domain, + bp->pdev->addr.bus, bp->pdev->addr.devid, + bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); + return -EINVAL; + } + + type = in_eeprom->magic >> 16; + + if (type == 0xffff) { /* special value for directory operations */ + index = in_eeprom->magic & 0xff; + dir_op = in_eeprom->magic >> 8; + if (index == 0) + return -EINVAL; + switch (dir_op) { + case 0x0e: /* erase */ + if (in_eeprom->offset != ~in_eeprom->magic) + return -EINVAL; + return bnxt_hwrm_erase_nvram_directory(bp, index - 1); + default: + return -EINVAL; + } + } + + /* Create or re-write an NVM item: */ + if (bnxt_dir_type_is_executable(type) == true) + return -EOPNOTSUPP; + ext = in_eeprom->magic & 0xffff; + ordinal = in_eeprom->offset >> 16; + attr = in_eeprom->offset & 0xffff; + + return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, + in_eeprom->data, in_eeprom->length); +} + +/* + * Initialization + */ + +static const struct eth_dev_ops bnxt_dev_ops = { + .dev_infos_get = bnxt_dev_info_get_op, + .dev_close = bnxt_dev_close_op, + .dev_configure = bnxt_dev_configure_op, + .dev_start = bnxt_dev_start_op, + .dev_stop = bnxt_dev_stop_op, + .dev_set_link_up = bnxt_dev_set_link_up_op, + .dev_set_link_down = bnxt_dev_set_link_down_op, + .stats_get = bnxt_stats_get_op, + .stats_reset = bnxt_stats_reset_op, + .rx_queue_setup = bnxt_rx_queue_setup_op, + .rx_queue_release = bnxt_rx_queue_release_op, + .tx_queue_setup = bnxt_tx_queue_setup_op, + .tx_queue_release = bnxt_tx_queue_release_op, + .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, + .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, + .reta_update = bnxt_reta_update_op, + .reta_query = bnxt_reta_query_op, + .rss_hash_update = bnxt_rss_hash_update_op, + .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, + .link_update = bnxt_link_update_op, + .promiscuous_enable = bnxt_promiscuous_enable_op, + .promiscuous_disable = bnxt_promiscuous_disable_op, + .allmulticast_enable = bnxt_allmulticast_enable_op, + .allmulticast_disable = bnxt_allmulticast_disable_op, + .mac_addr_add = bnxt_mac_addr_add_op, + .mac_addr_remove = bnxt_mac_addr_remove_op, + .flow_ctrl_get = bnxt_flow_ctrl_get_op, + .flow_ctrl_set = bnxt_flow_ctrl_set_op, + .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, + .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, + .vlan_filter_set = bnxt_vlan_filter_set_op, + .vlan_offload_set = bnxt_vlan_offload_set_op, + .vlan_tpid_set = bnxt_vlan_tpid_set_op, + .vlan_pvid_set = bnxt_vlan_pvid_set_op, + .mtu_set = bnxt_mtu_set_op, + .mac_addr_set = bnxt_set_default_mac_addr_op, + .xstats_get = bnxt_dev_xstats_get_op, + .xstats_get_names = bnxt_dev_xstats_get_names_op, + .xstats_reset = bnxt_dev_xstats_reset_op, + .fw_version_get = bnxt_fw_version_get, + .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, + .rxq_info_get = bnxt_rxq_info_get_op, + .txq_info_get = bnxt_txq_info_get_op, + .dev_led_on = bnxt_dev_led_on_op, + .dev_led_off = bnxt_dev_led_off_op, + .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op, + .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op, + .rx_queue_count = bnxt_rx_queue_count_op, + .rx_descriptor_status = bnxt_rx_descriptor_status_op, + .tx_descriptor_status = bnxt_tx_descriptor_status_op, + .rx_queue_start = bnxt_rx_queue_start, + .rx_queue_stop = bnxt_rx_queue_stop, + .tx_queue_start = bnxt_tx_queue_start, + .tx_queue_stop = bnxt_tx_queue_stop, + .filter_ctrl = bnxt_filter_ctrl_op, + .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, + .get_eeprom_length = bnxt_get_eeprom_length_op, + .get_eeprom = bnxt_get_eeprom_op, + .set_eeprom = bnxt_set_eeprom_op, + .timesync_enable = bnxt_timesync_enable, + .timesync_disable = bnxt_timesync_disable, + .timesync_read_time = bnxt_timesync_read_time, + .timesync_write_time = bnxt_timesync_write_time, + .timesync_adjust_time = bnxt_timesync_adjust_time, + .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, +}; + +static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) +{ + uint32_t offset; + + /* Only pre-map the reset GRC registers using window 3 */ + rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); + + offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); + + return offset; +} + +int bnxt_map_fw_health_status_regs(struct bnxt *bp) +{ + struct bnxt_error_recovery_info *info = bp->recovery_info; + uint32_t reg_base = 0xffffffff; + int i; + + /* Only pre-map the monitoring GRC registers using window 2 */ + for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { + uint32_t reg = info->status_regs[i]; + + if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) + continue; + + if (reg_base == 0xffffffff) + reg_base = reg & 0xfffff000; + if ((reg & 0xfffff000) != reg_base) + return -ERANGE; + + /* Use mask 0xffc as the Lower 2 bits indicates + * address space location + */ + info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + + (reg & 0xffc); + } + + if (reg_base == 0xffffffff) + return 0; + + rte_write32(reg_base, (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); + + return 0; +} + +static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) +{ + struct bnxt_error_recovery_info *info = bp->recovery_info; + uint32_t delay = info->delay_after_reset[index]; + uint32_t val = info->reset_reg_val[index]; + uint32_t reg = info->reset_reg[index]; + uint32_t type, offset; + + type = BNXT_FW_STATUS_REG_TYPE(reg); + offset = BNXT_FW_STATUS_REG_OFF(reg); + + switch (type) { + case BNXT_FW_STATUS_REG_TYPE_CFG: + rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); + break; + case BNXT_FW_STATUS_REG_TYPE_GRC: + offset = bnxt_map_reset_regs(bp, offset); + rte_write32(val, (uint8_t *)bp->bar0 + offset); + break; + case BNXT_FW_STATUS_REG_TYPE_BAR0: + rte_write32(val, (uint8_t *)bp->bar0 + offset); + break; + } + /* wait on a specific interval of time until core reset is complete */ + if (delay) + rte_delay_ms(delay); +} + +static void bnxt_dev_cleanup(struct bnxt *bp) +{ + bnxt_set_hwrm_link_config(bp, false); + bp->link_info.link_up = 0; + if (bp->dev_stopped == 0) + bnxt_dev_stop_op(bp->eth_dev); + + bnxt_uninit_resources(bp, true); +} + +static int bnxt_restore_filters(struct bnxt *bp) +{ + struct rte_eth_dev *dev = bp->eth_dev; + int ret = 0; + + if (dev->data->all_multicast) + ret = bnxt_allmulticast_enable_op(dev); + if (dev->data->promiscuous) + ret = bnxt_promiscuous_enable_op(dev); + + /* TODO restore other filters as well */ + return ret; +} + +static void bnxt_dev_recover(void *arg) +{ + struct bnxt *bp = arg; + int timeout = bp->fw_reset_max_msecs; + int rc = 0; + + /* Clear Error flag so that device re-init should happen */ + bp->flags &= ~BNXT_FLAG_FATAL_ERROR; + + do { + rc = bnxt_hwrm_ver_get(bp); + if (rc == 0) + break; + rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); + timeout -= BNXT_FW_READY_WAIT_INTERVAL; + } while (rc && timeout); + + if (rc) { + PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); + goto err; + } + + rc = bnxt_init_resources(bp, true); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to initialize resources after reset\n"); + goto err; + } + /* clear reset flag as the device is initialized now */ + bp->flags &= ~BNXT_FLAG_FW_RESET; + + rc = bnxt_dev_start_op(bp->eth_dev); + if (rc) { + PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); + goto err; + } + + rc = bnxt_restore_filters(bp); + if (rc) + goto err; + + PMD_DRV_LOG(INFO, "Recovered from FW reset\n"); + return; +err: + bp->flags |= BNXT_FLAG_FATAL_ERROR; + bnxt_uninit_resources(bp, false); + PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); +} + +void bnxt_dev_reset_and_resume(void *arg) +{ + struct bnxt *bp = arg; + int rc; + + bnxt_dev_cleanup(bp); + + bnxt_wait_for_device_shutdown(bp); + + rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs, + bnxt_dev_recover, (void *)bp); + if (rc) + PMD_DRV_LOG(ERR, "Error setting recovery alarm"); +} + +uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) +{ + struct bnxt_error_recovery_info *info = bp->recovery_info; + uint32_t reg = info->status_regs[index]; + uint32_t type, offset, val = 0; + + type = BNXT_FW_STATUS_REG_TYPE(reg); + offset = BNXT_FW_STATUS_REG_OFF(reg); + + switch (type) { + case BNXT_FW_STATUS_REG_TYPE_CFG: + rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); + break; + case BNXT_FW_STATUS_REG_TYPE_GRC: + offset = info->mapped_status_regs[index]; + /* FALLTHROUGH */ + case BNXT_FW_STATUS_REG_TYPE_BAR0: + val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + offset)); + break; + } + + return val; +} + +static int bnxt_fw_reset_all(struct bnxt *bp) +{ + struct bnxt_error_recovery_info *info = bp->recovery_info; + uint32_t i; + int rc = 0; + + if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { + /* Reset through master function driver */ + for (i = 0; i < info->reg_array_cnt; i++) + bnxt_write_fw_reset_reg(bp, i); + /* Wait for time specified by FW after triggering reset */ + rte_delay_ms(info->master_func_wait_period_after_reset); + } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { + /* Reset with the help of Kong processor */ + rc = bnxt_hwrm_fw_reset(bp); + if (rc) + PMD_DRV_LOG(ERR, "Failed to reset FW\n"); + } + + return rc; +} + +static void bnxt_fw_reset_cb(void *arg) +{ + struct bnxt *bp = arg; + struct bnxt_error_recovery_info *info = bp->recovery_info; + int rc = 0; + + /* Only Master function can do FW reset */ + if (bnxt_is_master_func(bp) && + bnxt_is_recovery_enabled(bp)) { + rc = bnxt_fw_reset_all(bp); + if (rc) { + PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); + return; + } + } + + /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send + * EXCEPTION_FATAL_ASYNC event to all the functions + * (including MASTER FUNC). After receiving this Async, all the active + * drivers should treat this case as FW initiated recovery + */ + if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { + bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; + bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; + + /* To recover from error */ + rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, + (void *)bp); + } +} + +/* Driver should poll FW heartbeat, reset_counter with the frequency + * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. + * When the driver detects heartbeat stop or change in reset_counter, + * it has to trigger a reset to recover from the error condition. + * A “master PF” is the function who will have the privilege to + * initiate the chimp reset. The master PF will be elected by the + * firmware and will be notified through async message. + */ +static void bnxt_check_fw_health(void *arg) +{ + struct bnxt *bp = arg; + struct bnxt_error_recovery_info *info = bp->recovery_info; + uint32_t val = 0, wait_msec; + + if (!info || !bnxt_is_recovery_enabled(bp) || + is_bnxt_in_error(bp)) + return; + + val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); + if (val == info->last_heart_beat) + goto reset; + + info->last_heart_beat = val; + + val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); + if (val != info->last_reset_counter) + goto reset; + + info->last_reset_counter = val; + + rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, + bnxt_check_fw_health, (void *)bp); + + return; +reset: + /* Stop DMA to/from device */ + bp->flags |= BNXT_FLAG_FATAL_ERROR; + bp->flags |= BNXT_FLAG_FW_RESET; + + PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); + + if (bnxt_is_master_func(bp)) + wait_msec = info->master_func_wait_period; + else + wait_msec = info->normal_func_wait_period; + + rte_eal_alarm_set(US_PER_MS * wait_msec, + bnxt_fw_reset_cb, (void *)bp); +} + +void bnxt_schedule_fw_health_check(struct bnxt *bp) +{ + uint32_t polling_freq; + + if (!bnxt_is_recovery_enabled(bp)) + return; + + if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) + return; + + polling_freq = bp->recovery_info->driver_polling_freq; + + rte_eal_alarm_set(US_PER_MS * polling_freq, + bnxt_check_fw_health, (void *)bp); + bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; +} + +static void bnxt_cancel_fw_health_check(struct bnxt *bp) +{ + if (!bnxt_is_recovery_enabled(bp)) + return; + + rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); + bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; +} + +static bool bnxt_vf_pciid(uint16_t id) +{ + if (id == BROADCOM_DEV_ID_57304_VF || + id == BROADCOM_DEV_ID_57406_VF || + id == BROADCOM_DEV_ID_5731X_VF || + id == BROADCOM_DEV_ID_5741X_VF || + id == BROADCOM_DEV_ID_57414_VF || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 || + id == BROADCOM_DEV_ID_58802_VF || + id == BROADCOM_DEV_ID_57500_VF1 || + id == BROADCOM_DEV_ID_57500_VF2) + return true; + return false; +} + +bool bnxt_stratus_device(struct bnxt *bp) +{ + uint16_t id = bp->pdev->id.device_id; + + if (id == BROADCOM_DEV_ID_STRATUS_NIC || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF2) + return true; + return false; +} + +static int bnxt_init_board(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct bnxt *bp = eth_dev->data->dev_private; + + /* enable device (incl. PCI PM wakeup), and bus-mastering */ + bp->bar0 = (void *)pci_dev->mem_resource[0].addr; + bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; + if (!bp->bar0 || !bp->doorbell_base) { + PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); + return -ENODEV; + } + + bp->eth_dev = eth_dev; + bp->pdev = pci_dev; + + return 0; +} + +static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg, + uint32_t mem_size, + const char *suffix, + uint16_t idx) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + const struct rte_memzone *mz = NULL; + char mz_name[RTE_MEMZONE_NAMESIZE]; + rte_iova_t mz_phys_addr; + uint64_t valid_bits = 0; + uint32_t sz; + int i; + + if (!mem_size) + return 0; + + rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / + BNXT_PAGE_SIZE; + rmem->page_size = BNXT_PAGE_SIZE; + rmem->pg_arr = ctx_pg->ctx_pg_arr; + rmem->dma_arr = ctx_pg->ctx_dma_arr; + rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; + + valid_bits = PTU_PTE_VALID; + + if (rmem->nr_pages > 1) { + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + "bnxt_ctx_pg_tbl%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); + mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; + mz = rte_memzone_lookup(mz_name); + if (!mz) { + mz = rte_memzone_reserve_aligned(mz_name, + rmem->nr_pages * 8, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG, + BNXT_PAGE_SIZE); + if (mz == NULL) + return -ENOMEM; + } + + memset(mz->addr, 0, mz->len); + mz_phys_addr = mz->iova; + if ((unsigned long)mz->addr == mz_phys_addr) { + PMD_DRV_LOG(DEBUG, + "physical address same as virtual\n"); + PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n"); + mz_phys_addr = rte_mem_virt2iova(mz->addr); + if (mz_phys_addr == RTE_BAD_IOVA) { + PMD_DRV_LOG(ERR, + "unable to map addr to phys memory\n"); + return -ENOMEM; + } + } + rte_mem_lock_page(((char *)mz->addr)); + + rmem->pg_tbl = mz->addr; + rmem->pg_tbl_map = mz_phys_addr; + rmem->pg_tbl_mz = mz; + } + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); + mz = rte_memzone_lookup(mz_name); + if (!mz) { + mz = rte_memzone_reserve_aligned(mz_name, + mem_size, + SOCKET_ID_ANY, + RTE_MEMZONE_1GB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG, + BNXT_PAGE_SIZE); + if (mz == NULL) + return -ENOMEM; + } + + memset(mz->addr, 0, mz->len); + mz_phys_addr = mz->iova; + if ((unsigned long)mz->addr == mz_phys_addr) { + PMD_DRV_LOG(DEBUG, + "Memzone physical address same as virtual.\n"); + PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n"); + for (sz = 0; sz < mem_size; sz += BNXT_PAGE_SIZE) + rte_mem_lock_page(((char *)mz->addr) + sz); + mz_phys_addr = rte_mem_virt2iova(mz->addr); + if (mz_phys_addr == RTE_BAD_IOVA) { + PMD_DRV_LOG(ERR, + "unable to map addr to phys memory\n"); + return -ENOMEM; + } + } + + for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { + rte_mem_lock_page(((char *)mz->addr) + sz); + rmem->pg_arr[i] = ((char *)mz->addr) + sz; + rmem->dma_arr[i] = mz_phys_addr + sz; + + if (rmem->nr_pages > 1) { + if (i == rmem->nr_pages - 2 && + (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) + valid_bits |= PTU_PTE_NEXT_TO_LAST; + else if (i == rmem->nr_pages - 1 && + (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) + valid_bits |= PTU_PTE_LAST; + + rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | + valid_bits); + } + } + + rmem->mz = mz; + if (rmem->vmem_size) + rmem->vmem = (void **)mz->addr; + rmem->dma_arr[0] = mz_phys_addr; + return 0; +} + +static void bnxt_free_ctx_mem(struct bnxt *bp) +{ + int i; + + if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) + return; + + bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; + rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); + rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); + rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); + rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); + rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); + + for (i = 0; i < BNXT_MAX_Q; i++) { + if (bp->ctx->tqm_mem[i]) + rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); + } + + rte_free(bp->ctx); + bp->ctx = NULL; +} + +#define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) + +#define min_t(type, x, y) ({ \ + type __min1 = (x); \ + type __min2 = (y); \ + __min1 < __min2 ? __min1 : __min2; }) + +#define max_t(type, x, y) ({ \ + type __max1 = (x); \ + type __max2 = (y); \ + __max1 > __max2 ? __max1 : __max2; }) + +#define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) + +int bnxt_alloc_ctx_mem(struct bnxt *bp) +{ + struct bnxt_ctx_pg_info *ctx_pg; + struct bnxt_ctx_mem_info *ctx; + uint32_t mem_size, ena, entries; + int i, rc; + + rc = bnxt_hwrm_func_backing_store_qcaps(bp); + if (rc) { + PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); + return rc; + } + ctx = bp->ctx; + if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) + return 0; + + ctx_pg = &ctx->qp_mem; + ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; + mem_size = ctx->qp_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); + if (rc) + return rc; + + ctx_pg = &ctx->srq_mem; + ctx_pg->entries = ctx->srq_max_l2_entries; + mem_size = ctx->srq_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); + if (rc) + return rc; + + ctx_pg = &ctx->cq_mem; + ctx_pg->entries = ctx->cq_max_l2_entries; + mem_size = ctx->cq_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); + if (rc) + return rc; + + ctx_pg = &ctx->vnic_mem; + ctx_pg->entries = ctx->vnic_max_vnic_entries + + ctx->vnic_max_ring_table_entries; + mem_size = ctx->vnic_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); + if (rc) + return rc; + + ctx_pg = &ctx->stat_mem; + ctx_pg->entries = ctx->stat_max_entries; + mem_size = ctx->stat_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); + if (rc) + return rc; + + entries = ctx->qp_max_l2_entries; + entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); + entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring, + ctx->tqm_max_entries_per_ring); + for (i = 0, ena = 0; i < BNXT_MAX_Q; i++) { + ctx_pg = ctx->tqm_mem[i]; + /* use min tqm entries for now. */ + ctx_pg->entries = entries; + mem_size = ctx->tqm_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i); + if (rc) + return rc; + ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; + } + + ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; + rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); + if (rc) + PMD_DRV_LOG(ERR, + "Failed to configure context mem: rc = %d\n", rc); + else + ctx->flags |= BNXT_CTX_FLAG_INITED; + + return rc; +} + +static int bnxt_alloc_stats_mem(struct bnxt *bp) +{ + struct rte_pci_device *pci_dev = bp->pdev; + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz = NULL; + uint32_t total_alloc_len; + rte_iova_t mz_phys_addr; + + if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) + return 0; + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, + pci_dev->addr.bus, pci_dev->addr.devid, + pci_dev->addr.function, "rx_port_stats"); + mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; + mz = rte_memzone_lookup(mz_name); + total_alloc_len = + RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + + sizeof(struct rx_port_stats_ext) + 512); + if (!mz) { + mz = rte_memzone_reserve(mz_name, total_alloc_len, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG); + if (mz == NULL) + return -ENOMEM; + } + memset(mz->addr, 0, mz->len); + mz_phys_addr = mz->iova; + if ((unsigned long)mz->addr == mz_phys_addr) { + PMD_DRV_LOG(DEBUG, + "Memzone physical address same as virtual.\n"); + PMD_DRV_LOG(DEBUG, + "Using rte_mem_virt2iova()\n"); + mz_phys_addr = rte_mem_virt2iova(mz->addr); + if (mz_phys_addr == RTE_BAD_IOVA) { + PMD_DRV_LOG(ERR, + "Can't map address to physical memory\n"); + return -ENOMEM; + } + } + + bp->rx_mem_zone = (const void *)mz; + bp->hw_rx_port_stats = mz->addr; + bp->hw_rx_port_stats_map = mz_phys_addr; + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, + pci_dev->addr.bus, pci_dev->addr.devid, + pci_dev->addr.function, "tx_port_stats"); + mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; + mz = rte_memzone_lookup(mz_name); + total_alloc_len = + RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + + sizeof(struct tx_port_stats_ext) + 512); + if (!mz) { + mz = rte_memzone_reserve(mz_name, + total_alloc_len, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG); + if (mz == NULL) + return -ENOMEM; + } + memset(mz->addr, 0, mz->len); + mz_phys_addr = mz->iova; + if ((unsigned long)mz->addr == mz_phys_addr) { + PMD_DRV_LOG(DEBUG, + "Memzone physical address same as virtual\n"); + PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n"); + mz_phys_addr = rte_mem_virt2iova(mz->addr); + if (mz_phys_addr == RTE_BAD_IOVA) { + PMD_DRV_LOG(ERR, + "Can't map address to physical memory\n"); + return -ENOMEM; + } + } + + bp->tx_mem_zone = (const void *)mz; + bp->hw_tx_port_stats = mz->addr; + bp->hw_tx_port_stats_map = mz_phys_addr; + bp->flags |= BNXT_FLAG_PORT_STATS; + + /* Display extended statistics if FW supports it */ + if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || + bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || + !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) + return 0; + + bp->hw_rx_port_stats_ext = (void *) + ((uint8_t *)bp->hw_rx_port_stats + + sizeof(struct rx_port_stats)); + bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + + sizeof(struct rx_port_stats); + bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; + + if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || + bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { + bp->hw_tx_port_stats_ext = (void *) + ((uint8_t *)bp->hw_tx_port_stats + + sizeof(struct tx_port_stats)); + bp->hw_tx_port_stats_ext_map = + bp->hw_tx_port_stats_map + + sizeof(struct tx_port_stats); + bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; + } + + return 0; +} + +static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + int rc = 0; + + eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", + RTE_ETHER_ADDR_LEN * + bp->max_l2_ctx, + 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); + return -ENOMEM; + } + + if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) { + if (BNXT_PF(bp)) + return -EINVAL; + + /* Generate a random MAC address, if none was assigned by PF */ + PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); + bnxt_eth_hw_addr_random(bp->mac_addr); + PMD_DRV_LOG(INFO, + "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n", + bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], + bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); + + rc = bnxt_hwrm_set_mac(bp); + if (!rc) + memcpy(&bp->eth_dev->data->mac_addrs[0], bp->mac_addr, + RTE_ETHER_ADDR_LEN); + return rc; + } + + /* Copy the permanent MAC from the FUNC_QCAPS response */ + memcpy(bp->mac_addr, bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN); + memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); + + return rc; +} + +static int bnxt_restore_dflt_mac(struct bnxt *bp) +{ + int rc = 0; + + /* MAC is already configured in FW */ + if (!bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) + return 0; + + /* Restore the old MAC configured */ + rc = bnxt_hwrm_set_mac(bp); + if (rc) + PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); + + return rc; +} + +static void bnxt_config_vf_req_fwd(struct bnxt *bp) +{ + if (!BNXT_PF(bp)) + return; + +#define ALLOW_FUNC(x) \ + { \ + uint32_t arg = (x); \ + bp->pf.vf_req_fwd[((arg) >> 5)] &= \ + ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ + } + + /* Forward all requests if firmware is new enough */ + if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && + (bp->fw_ver < ((20 << 24) | (7 << 16)))) || + ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { + memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); + } else { + PMD_DRV_LOG(WARNING, + "Firmware too old for VF mailbox functionality\n"); + memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); + } + + /* + * The following are used for driver cleanup. If we disallow these, + * VF drivers can't clean up cleanly. + */ + ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR); + ALLOW_FUNC(HWRM_VNIC_FREE); + ALLOW_FUNC(HWRM_RING_FREE); + ALLOW_FUNC(HWRM_RING_GRP_FREE); + ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); + ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); + ALLOW_FUNC(HWRM_STAT_CTX_FREE); + ALLOW_FUNC(HWRM_PORT_PHY_QCFG); + ALLOW_FUNC(HWRM_VNIC_TPA_CFG); +} + +static int bnxt_init_fw(struct bnxt *bp) +{ + uint16_t mtu; + int rc = 0; + + rc = bnxt_hwrm_ver_get(bp); + if (rc) + return rc; + + rc = bnxt_hwrm_func_reset(bp); + if (rc) + return -EIO; + + rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp); + if (rc) + return rc; + + rc = bnxt_hwrm_queue_qportcfg(bp); + if (rc) + return rc; + + /* Get the MAX capabilities for this function */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) + return rc; + + rc = bnxt_hwrm_func_qcfg(bp, &mtu); + if (rc) + return rc; + + /* Get the adapter error recovery support info */ + rc = bnxt_hwrm_error_recovery_qcfg(bp); + if (rc) + bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY; + + if (mtu >= RTE_ETHER_MIN_MTU && mtu <= BNXT_MAX_MTU && + mtu != bp->eth_dev->data->mtu) + bp->eth_dev->data->mtu = mtu; + + bnxt_hwrm_port_led_qcaps(bp); + + return 0; +} + +static int +bnxt_init_locks(struct bnxt *bp) +{ + int err; + + err = pthread_mutex_init(&bp->flow_lock, NULL); + if (err) + PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); + return err; +} + +static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) +{ + int rc; + + rc = bnxt_init_fw(bp); + if (rc) + return rc; + + if (!reconfig_dev) { + rc = bnxt_setup_mac_addr(bp->eth_dev); + if (rc) + return rc; + } else { + rc = bnxt_restore_dflt_mac(bp); + if (rc) + return rc; + } + + bnxt_config_vf_req_fwd(bp); + + rc = bnxt_hwrm_func_driver_register(bp); + if (rc) { + PMD_DRV_LOG(ERR, "Failed to register driver"); + return -EBUSY; + } + + if (BNXT_PF(bp)) { + if (bp->pdev->max_vfs) { + rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); + if (rc) { + PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); + return rc; + } + } else { + rc = bnxt_hwrm_allocate_pf_only(bp); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to allocate PF resources"); + return rc; + } + } + } + + rc = bnxt_alloc_mem(bp, reconfig_dev); + if (rc) + return rc; + + rc = bnxt_setup_int(bp); + if (rc) + return rc; + + bnxt_init_nic(bp); + + rc = bnxt_request_int(bp); + if (rc) + return rc; + + rc = bnxt_init_locks(bp); + if (rc) + return rc; + + return 0; +} + +static int +bnxt_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + static int version_printed; + struct bnxt *bp; + int rc; + + if (version_printed++ == 0) + PMD_DRV_LOG(INFO, "%s\n", bnxt_version); + + eth_dev->dev_ops = &bnxt_dev_ops; + eth_dev->rx_pkt_burst = &bnxt_recv_pkts; + eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; + + /* + * For secondary processes, we don't initialise any further + * as primary has already done this work. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + bp = eth_dev->data->dev_private; + + bp->dev_stopped = 1; + + if (bnxt_vf_pciid(pci_dev->id.device_id)) + bp->flags |= BNXT_FLAG_VF; + + if (pci_dev->id.device_id == BROADCOM_DEV_ID_57508 || + pci_dev->id.device_id == BROADCOM_DEV_ID_57504 || + pci_dev->id.device_id == BROADCOM_DEV_ID_57502 || + pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF1 || + pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF2) + bp->flags |= BNXT_FLAG_THOR_CHIP; + + if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || + pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || + pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || + pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) + bp->flags |= BNXT_FLAG_STINGRAY; + + rc = bnxt_init_board(eth_dev); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to initialize board rc: %x\n", rc); + return rc; + } + + rc = bnxt_alloc_hwrm_resources(bp); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to allocate hwrm resource rc: %x\n", rc); + goto error_free; + } + rc = bnxt_init_resources(bp, false); + if (rc) + goto error_free; + + rc = bnxt_alloc_stats_mem(bp); + if (rc) + goto error_free; + + PMD_DRV_LOG(INFO, + DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n", + pci_dev->mem_resource[0].phys_addr, + pci_dev->mem_resource[0].addr); + + return 0; + +error_free: + bnxt_dev_uninit(eth_dev); + return rc; +} + +static void +bnxt_uninit_locks(struct bnxt *bp) +{ + pthread_mutex_destroy(&bp->flow_lock); +} + +static int +bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) +{ + int rc; + + bnxt_free_int(bp); + bnxt_free_mem(bp, reconfig_dev); + bnxt_hwrm_func_buf_unrgtr(bp); + rc = bnxt_hwrm_func_driver_unregister(bp, 0); + bp->flags &= ~BNXT_FLAG_REGISTERED; + bnxt_free_ctx_mem(bp); + if (!reconfig_dev) { + bnxt_free_hwrm_resources(bp); + + if (bp->recovery_info != NULL) { + rte_free(bp->recovery_info); + bp->recovery_info = NULL; + } + } + + rte_free(bp->ptp_cfg); + bp->ptp_cfg = NULL; + return rc; +} + +static int +bnxt_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + int rc; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); + + rc = bnxt_uninit_resources(bp, false); + + if (bp->grp_info != NULL) { + rte_free(bp->grp_info); + bp->grp_info = NULL; + } + + if (bp->tx_mem_zone) { + rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); + bp->tx_mem_zone = NULL; + } + + if (bp->rx_mem_zone) { + rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); + bp->rx_mem_zone = NULL; + } + + if (bp->dev_stopped == 0) + bnxt_dev_close_op(eth_dev); + if (bp->pf.vf_info) + rte_free(bp->pf.vf_info); + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + bnxt_uninit_locks(bp); + + return rc; +} + +static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt), + bnxt_dev_init); +} + +static int bnxt_pci_remove(struct rte_pci_device *pci_dev) +{ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + return rte_eth_dev_pci_generic_remove(pci_dev, + bnxt_dev_uninit); + else + return rte_eth_dev_pci_generic_remove(pci_dev, NULL); +} + +static struct rte_pci_driver bnxt_rte_pmd = { + .id_table = bnxt_pci_id_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = bnxt_pci_probe, + .remove = bnxt_pci_remove, }; -RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd.pci_drv); +static bool +is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) +{ + if (strcmp(dev->device->driver->name, drv->driver.name)) + return false; + + return true; +} + +bool is_bnxt_supported(struct rte_eth_dev *dev) +{ + return is_device_supported(dev, &bnxt_rte_pmd); +} + +RTE_INIT(bnxt_init_log) +{ + bnxt_logtype_driver = rte_log_register("pmd.net.bnxt.driver"); + if (bnxt_logtype_driver >= 0) + rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE); +} + +RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); -RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio"); +RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");