X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_ethdev.c;h=c6431505383ec7069f024ef8d0902b977ac7bd25;hb=6d13ea8e8e49ab957deae2bba5ecf4a4bfe747d1;hp=41b23cc4f5910620a8619964948a5728590bdbad;hpb=ba09f838464db72d5c6b6589f84f6a465e5594b1;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 41b23cc4f5..c643150538 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -1,41 +1,13 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include #include #include -#include +#include #include #include #include @@ -54,14 +26,17 @@ #include "bnxt_vnic.h" #include "hsi_struct_def_dpdk.h" #include "bnxt_nvm_defs.h" +#include "bnxt_util.h" #define DRV_MODULE_NAME "bnxt" static const char bnxt_version[] = - "Broadcom Cumulus driver " DRV_MODULE_NAME "\n"; + "Broadcom NetXtreme driver " DRV_MODULE_NAME; +int bnxt_logtype_driver; #define PCI_VENDOR_ID_BROADCOM 0x14E4 -#define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609 +#define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606 +#define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 #define BROADCOM_DEV_ID_57414_VF 0x16c1 #define BROADCOM_DEV_ID_57301 0x16c8 @@ -96,10 +71,16 @@ static const char bnxt_version[] = #define BROADCOM_DEV_ID_57407_MF 0x16ea #define BROADCOM_DEV_ID_57414_MF 0x16ec #define BROADCOM_DEV_ID_57416_MF 0x16ee +#define BROADCOM_DEV_ID_58802 0xd802 +#define BROADCOM_DEV_ID_58804 0xd804 +#define BROADCOM_DEV_ID_58808 0x16f0 +#define BROADCOM_DEV_ID_58802_VF 0xd800 static const struct rte_pci_id bnxt_pci_id_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, - BROADCOM_DEV_ID_STRATUS_NIC_VF) }, + BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, + BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, @@ -134,6 +115,10 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -145,8 +130,32 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { ETH_RSS_NONFRAG_IPV6_TCP | \ ETH_RSS_NONFRAG_IPV6_UDP) +#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \ + DEV_TX_OFFLOAD_GRE_TNL_TSO | \ + DEV_TX_OFFLOAD_IPIP_TNL_TSO | \ + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \ + DEV_TX_OFFLOAD_MULTI_SEGS) + +#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \ + DEV_RX_OFFLOAD_VLAN_STRIP | \ + DEV_RX_OFFLOAD_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_UDP_CKSUM | \ + DEV_RX_OFFLOAD_TCP_CKSUM | \ + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_KEEP_CRC | \ + DEV_RX_OFFLOAD_TCP_LRO) + static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); static void bnxt_print_link_info(struct rte_eth_dev *eth_dev); +static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu); +static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); /***********************/ @@ -163,23 +172,12 @@ static void bnxt_free_mem(struct bnxt *bp) bnxt_free_stats(bp); bnxt_free_tx_rings(bp); bnxt_free_rx_rings(bp); - bnxt_free_def_cp_ring(bp); } static int bnxt_alloc_mem(struct bnxt *bp) { int rc; - /* Default completion ring */ - rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY); - if (rc) - goto alloc_mem_err; - - rc = bnxt_alloc_rings(bp, 0, NULL, NULL, - bp->def_cp_ring, "def_cp"); - if (rc) - goto alloc_mem_err; - rc = bnxt_alloc_vnic_mem(bp); if (rc) goto alloc_mem_err; @@ -201,116 +199,146 @@ alloc_mem_err: static int bnxt_init_chip(struct bnxt *bp) { - unsigned int i, rss_idx, fw_idx; + struct bnxt_rx_queue *rxq; struct rte_eth_link new; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint64_t rx_offloads = dev_conf->rxmode.offloads; uint32_t intr_vector = 0; uint32_t queue_id, base = BNXT_MISC_VEC_ID; uint32_t vec = BNXT_MISC_VEC_ID; + unsigned int i, j; int rc; /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); if (bp->eth_dev->data->mtu > ETHER_MTU) { - bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + bp->eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; bp->flags |= BNXT_FLAG_JUMBO; } else { - bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + bp->eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; bp->flags &= ~BNXT_FLAG_JUMBO; } rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); if (rc) { - RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc); + PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); goto err_out; } rc = bnxt_alloc_hwrm_rings(bp); if (rc) { - RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc); + PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); goto err_out; } rc = bnxt_alloc_all_hwrm_ring_grps(bp); if (rc) { - RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc); + PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); goto err_out; } rc = bnxt_mq_rx_configure(bp); if (rc) { - RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc); + PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); goto err_out; } /* VNIC configuration */ for (i = 0; i < bp->nr_vnics; i++) { + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps; + + vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0); + if (!vnic->fw_grp_ids) { + PMD_DRV_LOG(ERR, + "Failed to alloc %d bytes for group ids\n", + size); + rc = -ENOMEM; + goto err_out; + } + memset(vnic->fw_grp_ids, -1, size); + + PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", + i, vnic, vnic->fw_grp_ids); rc = bnxt_hwrm_vnic_alloc(bp, vnic); if (rc) { - RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n", + PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n", i, rc); goto err_out; } - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic); - if (rc) { - RTE_LOG(ERR, PMD, - "HWRM vnic %d ctx alloc failure rc: %x\n", - i, rc); - goto err_out; + /* Alloc RSS context only if RSS mode is enabled */ + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic); + if (rc) { + PMD_DRV_LOG(ERR, + "HWRM vnic %d ctx alloc failure rc: %x\n", + i, rc); + goto err_out; + } } + /* + * Firmware sets pf pair in default vnic cfg. If the VLAN strip + * setting is not available at this time, it will not be + * configured correctly in the CFA. + */ + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + vnic->vlan_strip = true; + else + vnic->vlan_strip = false; + rc = bnxt_hwrm_vnic_cfg(bp, vnic); if (rc) { - RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n", + PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", i, rc); goto err_out; } rc = bnxt_set_hwrm_vnic_filters(bp, vnic); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "HWRM vnic %d filter failure rc: %x\n", i, rc); goto err_out; } - if (vnic->rss_table && vnic->hash_type) { - /* - * Fill the RSS hash & redirection table with - * ring group ids for all VNICs - */ - for (rss_idx = 0, fw_idx = 0; - rss_idx < HW_HASH_INDEX_SIZE; - rss_idx++, fw_idx++) { - if (vnic->fw_grp_ids[fw_idx] == - INVALID_HW_RING_ID) - fw_idx = 0; - vnic->rss_table[rss_idx] = - vnic->fw_grp_ids[fw_idx]; - } - rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); - if (rc) { - RTE_LOG(ERR, PMD, - "HWRM vnic %d set RSS failure rc: %x\n", - i, rc); - goto err_out; - } + + for (j = 0; j < bp->rx_nr_rings; j++) { + rxq = bp->eth_dev->data->rx_queues[j]; + + PMD_DRV_LOG(DEBUG, + "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", + j, rxq->vnic, rxq->vnic->fw_grp_ids); + + if (rxq->rx_deferred_start) + rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; + } + + rc = bnxt_vnic_rss_configure(bp, vnic); + if (rc) { + PMD_DRV_LOG(ERR, + "HWRM vnic set RSS failure rc: %x\n", rc); + goto err_out; } bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); - if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) + if (bp->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_TCP_LRO) bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); else bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); } rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "HWRM cfa l2 rx mask failure rc: %x\n", rc); goto err_out; } @@ -320,10 +348,9 @@ static int bnxt_init_chip(struct bnxt *bp) !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { intr_vector = bp->eth_dev->data->nb_rx_queues; - RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__, - intr_vector); + PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); if (intr_vector > bp->rx_cp_nr_rings) { - RTE_LOG(ERR, PMD, "At most %d intr queues supported", + PMD_DRV_LOG(ERR, "At most %d intr queues supported", bp->rx_cp_nr_rings); return -ENOTSUP; } @@ -337,13 +364,13 @@ static int bnxt_init_chip(struct bnxt *bp) bp->eth_dev->data->nb_rx_queues * sizeof(int), 0); if (intr_handle->intr_vec == NULL) { - RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues" + PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" " intr_vec", bp->eth_dev->data->nb_rx_queues); return -ENOMEM; } - RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p " + PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", - __func__, intr_handle->intr_vec, intr_handle->nb_efd, + intr_handle->intr_vec, intr_handle->nb_efd, intr_handle->max_intr); } @@ -359,14 +386,14 @@ static int bnxt_init_chip(struct bnxt *bp) rc = bnxt_get_hwrm_link_config(bp, &new); if (rc) { - RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc); + PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc); goto err_out; } if (!bp->link_info.link_up) { rc = bnxt_set_hwrm_link_config(bp, true); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "HWRM link config failure rc: %x\n", rc); goto err_out; } @@ -404,10 +431,6 @@ static int bnxt_init_nic(struct bnxt *bp) bnxt_init_vnics(bp); bnxt_init_filters(bp); - rc = bnxt_init_chip(bp); - if (rc) - return rc; - return 0; } @@ -422,8 +445,6 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, uint16_t max_vnics, i, j, vpool, vrxq; unsigned int max_rx_rings; - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - /* MAC Specifics */ dev_info->max_mac_addrs = bp->max_l2_ctx; dev_info->max_hash_mac_addrs = 0; @@ -431,35 +452,24 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, /* PF/VF specifics */ if (BNXT_PF(bp)) dev_info->max_vfs = bp->pdev->max_vfs; - max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx, - RTE_MIN(bp->max_rsscos_ctx, - bp->max_stat_ctx))); + max_rx_rings = RTE_MIN(bp->max_vnics, bp->max_stat_ctx); /* For the sake of symmetry, max_rx_queues = max_tx_queues */ dev_info->max_rx_queues = max_rx_rings; dev_info->max_tx_queues = max_rx_rings; - dev_info->reta_size = bp->max_rsscos_ctx; + dev_info->reta_size = HW_HASH_INDEX_SIZE; dev_info->hash_key_size = 40; max_vnics = bp->max_vnics; /* Fast path specifics */ dev_info->min_rx_bufsize = 1; dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN - + VLAN_TAG_SIZE; - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO | - DEV_TX_OFFLOAD_IPIP_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO; + + VLAN_TAG_SIZE * 2; + + dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; + if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; + dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT; + dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; /* *INDENT-OFF* */ dev_info->default_rxconf = (struct rte_eth_rxconf) { @@ -469,7 +479,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, .wthresh = 0, }, .rx_free_thresh = 32, - .rx_drop_en = 0, + /* If no descriptors available, pkts are dropped by default */ + .rx_drop_en = 1, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -480,12 +491,14 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, }, .tx_free_thresh = 32, .tx_rs_thresh = 32, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, }; eth_dev->data->dev_conf.intr_conf.lsc = 1; eth_dev->data->dev_conf.intr_conf.rxq = 1; + dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; + dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; + dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; + dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; /* *INDENT-ON* */ @@ -525,20 +538,68 @@ found: static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; + int rc; bp->rx_queues = (void *)eth_dev->data->rx_queues; bp->tx_queues = (void *)eth_dev->data->tx_queues; + bp->tx_nr_rings = eth_dev->data->nb_tx_queues; + bp->rx_nr_rings = eth_dev->data->nb_rx_queues; + + if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { + rc = bnxt_hwrm_check_vf_rings(bp); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); + return -ENOSPC; + } + + rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); + return -ENOSPC; + } + } else { + /* legacy driver needs to get updated values */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) { + PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); + return rc; + } + } /* Inherit new configurations */ - bp->rx_nr_rings = eth_dev->data->nb_rx_queues; - bp->tx_nr_rings = eth_dev->data->nb_tx_queues; + if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || + eth_dev->data->nb_tx_queues > bp->max_tx_rings || + eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > + bp->max_cp_rings || + eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > + bp->max_stat_ctx || + (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps || + (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && + bp->max_vnics < eth_dev->data->nb_rx_queues)) { + PMD_DRV_LOG(ERR, + "Insufficient resources to support requested config\n"); + PMD_DRV_LOG(ERR, + "Num Queues Requested: Tx %d, Rx %d\n", + eth_dev->data->nb_tx_queues, + eth_dev->data->nb_rx_queues); + PMD_DRV_LOG(ERR, + "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", + bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, + bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); + return -ENOSPC; + } + bp->rx_cp_nr_rings = bp->rx_nr_rings; bp->tx_cp_nr_rings = bp->tx_nr_rings; - if (eth_dev->data->dev_conf.rxmode.jumbo_frame) + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { eth_dev->data->mtu = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE; + ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * + BNXT_NUM_VLANS; + bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); + } return 0; } @@ -547,13 +608,13 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) struct rte_eth_link *link = ð_dev->data->dev_link; if (link->link_status) - RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n", + PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", eth_dev->data->port_id, (uint32_t)link->link_speed, (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex\n")); else - RTE_LOG(INFO, PMD, "Port %d Link Down\n", + PMD_DRV_LOG(INFO, "Port %d Link Down\n", eth_dev->data->port_id); } @@ -566,25 +627,26 @@ static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev) static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; int vlan_mask = 0; int rc; if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); } bp->dev_stopped = 0; - rc = bnxt_init_nic(bp); + rc = bnxt_init_chip(bp); if (rc) goto error; bnxt_link_update_op(eth_dev, 1); - if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) vlan_mask |= ETH_VLAN_FILTER_MASK; - if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) vlan_mask |= ETH_VLAN_STRIP_MASK; rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); if (rc) @@ -630,13 +692,15 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + bp->flags &= ~BNXT_FLAG_INIT_DONE; if (bp->eth_dev->data->dev_started) { /* TBD: STOP HW queues DMA */ eth_dev->data->dev_link.link_status = 0; } bnxt_set_hwrm_link_config(bp, false); bnxt_hwrm_port_clr_stats(bp); - bp->flags &= ~BNXT_FLAG_INIT_DONE; + bnxt_free_tx_mbufs(bp); + bnxt_free_rx_mbufs(bp); bnxt_shutdown_nic(bp); bp->dev_stopped = 1; } @@ -648,9 +712,6 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) if (bp->dev_stopped == 0) bnxt_dev_stop_op(eth_dev); - bnxt_free_tx_mbufs(bp); - bnxt_free_rx_mbufs(bp); - bnxt_free_mem(bp); if (eth_dev->data->mac_addrs != NULL) { rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; @@ -659,6 +720,8 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) rte_free(bp->grp_info); bp->grp_info = NULL; } + + bnxt_dev_uninit(eth_dev); } static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, @@ -668,66 +731,62 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; struct bnxt_vnic_info *vnic; struct bnxt_filter_info *filter, *temp_filter; - uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS); uint32_t i; /* * Loop through all VNICs from the specified filter flow pools to * remove the corresponding MAC addr filter */ - for (i = 0; i < pool; i++) { + for (i = 0; i < bp->nr_vnics; i++) { if (!(pool_mask & (1ULL << i))) continue; - STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { - filter = STAILQ_FIRST(&vnic->filter); - while (filter) { - temp_filter = STAILQ_NEXT(filter, next); - if (filter->mac_index == index) { - STAILQ_REMOVE(&vnic->filter, filter, - bnxt_filter_info, next); - bnxt_hwrm_clear_l2_filter(bp, filter); - filter->mac_index = INVALID_MAC_INDEX; - memset(&filter->l2_addr, 0, - ETHER_ADDR_LEN); - STAILQ_INSERT_TAIL( - &bp->free_filter_list, - filter, next); - } - filter = temp_filter; + vnic = &bp->vnic_info[i]; + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + temp_filter = STAILQ_NEXT(filter, next); + if (filter->mac_index == index) { + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + bnxt_hwrm_clear_l2_filter(bp, filter); + filter->mac_index = INVALID_MAC_INDEX; + memset(&filter->l2_addr, 0, ETHER_ADDR_LEN); + STAILQ_INSERT_TAIL(&bp->free_filter_list, + filter, next); } + filter = temp_filter; } } } static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, - struct ether_addr *mac_addr, + struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; - struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]); + struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; struct bnxt_filter_info *filter; - if (BNXT_VF(bp)) { - RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n"); + if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) { + PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); return -ENOTSUP; } if (!vnic) { - RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool); + PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); return -EINVAL; } /* Attach requested MAC address to the new l2_filter */ STAILQ_FOREACH(filter, &vnic->filter, next) { if (filter->mac_index == index) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "MAC addr already existed for pool %d\n", pool); - return -EINVAL; + return 0; } } filter = bnxt_alloc_filter(bp); if (!filter) { - RTE_LOG(ERR, PMD, "L2 filter alloc failed\n"); + PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); return -ENODEV; } STAILQ_INSERT_TAIL(&vnic->filter, filter, next); @@ -750,7 +809,7 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) if (rc) { new.link_speed = ETH_LINK_SPEED_100M; new.link_duplex = ETH_LINK_FULL_DUPLEX; - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Failed to retrieve link rc = 0x%x!\n", rc); goto out; } @@ -766,6 +825,11 @@ out: new.link_speed != eth_dev->data->dev_link.link_speed) { memcpy(ð_dev->data->dev_link, &new, sizeof(struct rte_eth_link)); + + _rte_eth_dev_callback_process(eth_dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + bnxt_print_link_info(eth_dev); } @@ -841,18 +905,16 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, return -EINVAL; if (reta_size != HW_HASH_INDEX_SIZE) { - RTE_LOG(ERR, PMD, "The configured hash table lookup size " + PMD_DRV_LOG(ERR, "The configured hash table lookup size " "(%d) must equal the size supported by the hardware " "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); return -EINVAL; } /* Update the RSS VNIC(s) */ - for (i = 0; i < MAX_FF_POOLS; i++) { - STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { - memcpy(vnic->rss_table, reta_conf, reta_size); - - bnxt_hwrm_vnic_rss_cfg(bp, vnic); - } + for (i = 0; i < bp->max_vnics; i++) { + vnic = &bp->vnic_info[i]; + memcpy(vnic->rss_table, reta_conf, reta_size); + bnxt_hwrm_vnic_rss_cfg(bp, vnic); } return 0; } @@ -873,7 +935,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, return -EINVAL; if (reta_size != HW_HASH_INDEX_SIZE) { - RTE_LOG(ERR, PMD, "The configured hash table lookup size " + PMD_DRV_LOG(ERR, "The configured hash table lookup size " "(%d) must equal the size supported by the hardware " "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); return -EINVAL; @@ -896,7 +958,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_vnic_info *vnic; uint16_t hash_type = 0; - int i; + unsigned int i; /* * If RSS enablement were different than dev_configure, @@ -904,7 +966,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, */ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { if (!rss_conf->rss_hf) - RTE_LOG(ERR, PMD, "Hash type NONE\n"); + PMD_DRV_LOG(ERR, "Hash type NONE\n"); } else { if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) return -EINVAL; @@ -927,21 +989,20 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; /* Update the RSS VNIC(s) */ - for (i = 0; i < MAX_FF_POOLS; i++) { - STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { - vnic->hash_type = hash_type; - - /* - * Use the supplied key if the key length is - * acceptable and the rss_key is not NULL - */ - if (rss_conf->rss_key && - rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) - memcpy(vnic->rss_hash_key, rss_conf->rss_key, - rss_conf->rss_key_len); - - bnxt_hwrm_vnic_rss_cfg(bp, vnic); - } + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + vnic->hash_type = hash_type; + + /* + * Use the supplied key if the key length is + * acceptable and the rss_key is not NULL + */ + if (rss_conf->rss_key && + rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) + memcpy(vnic->rss_hash_key, rss_conf->rss_key, + rss_conf->rss_key_len); + + bnxt_hwrm_vnic_rss_cfg(bp, vnic); } return 0; } @@ -993,7 +1054,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; } if (hash_types) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Unknwon RSS config from firmware (%08x), RSS disabled", vnic->hash_type); return -ENOTSUP; @@ -1042,7 +1103,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, struct bnxt *bp = (struct bnxt *)dev->data->dev_private; if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { - RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n"); + PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n"); return -ENOTSUP; } @@ -1102,10 +1163,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, switch (udp_tunnel->prot_type) { case RTE_TUNNEL_TYPE_VXLAN: if (bp->vxlan_port_cnt) { - RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n", + PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", udp_tunnel->udp_port); if (bp->vxlan_port != udp_tunnel->udp_port) { - RTE_LOG(ERR, PMD, "Only one port allowed\n"); + PMD_DRV_LOG(ERR, "Only one port allowed\n"); return -ENOSPC; } bp->vxlan_port_cnt++; @@ -1117,10 +1178,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, break; case RTE_TUNNEL_TYPE_GENEVE: if (bp->geneve_port_cnt) { - RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n", + PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", udp_tunnel->udp_port); if (bp->geneve_port != udp_tunnel->udp_port) { - RTE_LOG(ERR, PMD, "Only one port allowed\n"); + PMD_DRV_LOG(ERR, "Only one port allowed\n"); return -ENOSPC; } bp->geneve_port_cnt++; @@ -1131,7 +1192,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, bp->geneve_port_cnt++; break; default: - RTE_LOG(ERR, PMD, "Tunnel type is not supported\n"); + PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); return -ENOTSUP; } rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, @@ -1151,11 +1212,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, switch (udp_tunnel->prot_type) { case RTE_TUNNEL_TYPE_VXLAN: if (!bp->vxlan_port_cnt) { - RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n"); + PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); return -EINVAL; } if (bp->vxlan_port != udp_tunnel->udp_port) { - RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n", + PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", udp_tunnel->udp_port, bp->vxlan_port); return -EINVAL; } @@ -1168,11 +1229,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, break; case RTE_TUNNEL_TYPE_GENEVE: if (!bp->geneve_port_cnt) { - RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n"); + PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); return -EINVAL; } if (bp->geneve_port != udp_tunnel->udp_port) { - RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n", + PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", udp_tunnel->udp_port, bp->geneve_port); return -EINVAL; } @@ -1184,7 +1245,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, port = bp->geneve_fw_dst_port_id; break; default: - RTE_LOG(ERR, PMD, "Tunnel type is not supported\n"); + PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); return -ENOTSUP; } @@ -1218,53 +1279,51 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) * else * VLAN filter doesn't exist, just skip and continue */ - STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { - filter = STAILQ_FIRST(&vnic->filter); - while (filter) { - temp_filter = STAILQ_NEXT(filter, next); - - if (filter->enables & chk && - filter->l2_ovlan == vlan_id) { - /* Must delete the filter */ - STAILQ_REMOVE(&vnic->filter, filter, - bnxt_filter_info, next); - bnxt_hwrm_clear_l2_filter(bp, filter); - STAILQ_INSERT_TAIL( - &bp->free_filter_list, - filter, next); - - /* - * Need to examine to see if the MAC - * filter already existed or not before - * allocating a new one - */ - - new_filter = bnxt_alloc_filter(bp); - if (!new_filter) { - RTE_LOG(ERR, PMD, + vnic = &bp->vnic_info[i]; + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + temp_filter = STAILQ_NEXT(filter, next); + + if (filter->enables & chk && + filter->l2_ovlan == vlan_id) { + /* Must delete the filter */ + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + bnxt_hwrm_clear_l2_filter(bp, filter); + STAILQ_INSERT_TAIL(&bp->free_filter_list, + filter, next); + + /* + * Need to examine to see if the MAC + * filter already existed or not before + * allocating a new one + */ + + new_filter = bnxt_alloc_filter(bp); + if (!new_filter) { + PMD_DRV_LOG(ERR, "MAC/VLAN filter alloc failed\n"); - rc = -ENOMEM; - goto exit; - } - STAILQ_INSERT_TAIL(&vnic->filter, - new_filter, next); - /* Inherit MAC from previous filter */ - new_filter->mac_index = - filter->mac_index; - memcpy(new_filter->l2_addr, - filter->l2_addr, ETHER_ADDR_LEN); - /* MAC only filter */ - rc = bnxt_hwrm_set_l2_filter(bp, - vnic->fw_vnic_id, - new_filter); - if (rc) - goto exit; - RTE_LOG(INFO, PMD, - "Del Vlan filter for %d\n", - vlan_id); + rc = -ENOMEM; + goto exit; } - filter = temp_filter; + STAILQ_INSERT_TAIL(&vnic->filter, + new_filter, next); + /* Inherit MAC from previous filter */ + new_filter->mac_index = + filter->mac_index; + memcpy(new_filter->l2_addr, filter->l2_addr, + ETHER_ADDR_LEN); + /* MAC only filter */ + rc = bnxt_hwrm_set_l2_filter(bp, + vnic->fw_vnic_id, + new_filter); + if (rc) + goto exit; + PMD_DRV_LOG(INFO, + "Del Vlan filter for %d\n", + vlan_id); } + filter = temp_filter; } } exit: @@ -1277,9 +1336,9 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) struct bnxt_vnic_info *vnic; unsigned int i; int rc = 0; - uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN | - HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK; - uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN; + uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; + uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; /* Cycle through all VNICs */ for (i = 0; i < bp->nr_vnics; i++) { @@ -1294,51 +1353,48 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) * Remove the old MAC only filter * Add a new MAC+VLAN filter */ - STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { - filter = STAILQ_FIRST(&vnic->filter); - while (filter) { - temp_filter = STAILQ_NEXT(filter, next); - - if (filter->enables & chk) { - if (filter->l2_ovlan == vlan_id) - goto cont; - } else { - /* Must delete the MAC filter */ - STAILQ_REMOVE(&vnic->filter, filter, - bnxt_filter_info, next); - bnxt_hwrm_clear_l2_filter(bp, filter); - filter->l2_ovlan = 0; - STAILQ_INSERT_TAIL( - &bp->free_filter_list, - filter, next); - } - new_filter = bnxt_alloc_filter(bp); - if (!new_filter) { - RTE_LOG(ERR, PMD, + vnic = &bp->vnic_info[i]; + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + temp_filter = STAILQ_NEXT(filter, next); + + if (filter->enables & chk) { + if (filter->l2_ivlan == vlan_id) + goto cont; + } else { + /* Must delete the MAC filter */ + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + bnxt_hwrm_clear_l2_filter(bp, filter); + filter->l2_ovlan = 0; + STAILQ_INSERT_TAIL(&bp->free_filter_list, + filter, next); + } + new_filter = bnxt_alloc_filter(bp); + if (!new_filter) { + PMD_DRV_LOG(ERR, "MAC/VLAN filter alloc failed\n"); - rc = -ENOMEM; - goto exit; - } - STAILQ_INSERT_TAIL(&vnic->filter, new_filter, - next); - /* Inherit MAC from the previous filter */ - new_filter->mac_index = filter->mac_index; - memcpy(new_filter->l2_addr, filter->l2_addr, - ETHER_ADDR_LEN); - /* MAC + VLAN ID filter */ - new_filter->l2_ovlan = vlan_id; - new_filter->l2_ovlan_mask = 0xF000; - new_filter->enables |= en; - rc = bnxt_hwrm_set_l2_filter(bp, - vnic->fw_vnic_id, - new_filter); - if (rc) - goto exit; - RTE_LOG(INFO, PMD, - "Added Vlan filter for %d\n", vlan_id); -cont: - filter = temp_filter; + rc = -ENOMEM; + goto exit; } + STAILQ_INSERT_TAIL(&vnic->filter, new_filter, next); + /* Inherit MAC from the previous filter */ + new_filter->mac_index = filter->mac_index; + memcpy(new_filter->l2_addr, filter->l2_addr, + ETHER_ADDR_LEN); + /* MAC + VLAN ID filter */ + new_filter->l2_ivlan = vlan_id; + new_filter->l2_ivlan_mask = 0xF000; + new_filter->enables |= en; + rc = bnxt_hwrm_set_l2_filter(bp, + vnic->fw_vnic_id, + new_filter); + if (rc) + goto exit; + PMD_DRV_LOG(INFO, + "Added Vlan filter for %d\n", vlan_id); +cont: + filter = temp_filter; } } exit: @@ -1346,7 +1402,7 @@ exit: } static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, - uint16_t vlan_id, int on) + uint16_t vlan_id, int on) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; @@ -1361,40 +1417,42 @@ static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) { struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; unsigned int i; if (mask & ETH_VLAN_FILTER_MASK) { - if (!dev->data->dev_conf.rxmode.hw_vlan_filter) { + if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { /* Remove any VLAN filters programmed */ for (i = 0; i < 4095; i++) bnxt_del_vlan_filter(bp, i); } - RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n", - dev->data->dev_conf.rxmode.hw_vlan_filter); + PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", + !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); } if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ for (i = 0; i < bp->nr_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) vnic->vlan_strip = true; else vnic->vlan_strip = false; bnxt_hwrm_vnic_cfg(bp, vnic); } - RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n", - dev->data->dev_conf.rxmode.hw_vlan_strip); + PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", + !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); } if (mask & ETH_VLAN_EXTEND_MASK) - RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n"); + PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n"); return 0; } -static void -bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) +static int +bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) { struct bnxt *bp = (struct bnxt *)dev->data->dev_private; /* Default Filter is tied to VNIC 0 */ @@ -1402,11 +1460,10 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) struct bnxt_filter_info *filter; int rc; - if (BNXT_VF(bp)) - return; + if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) + return -EPERM; memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr)); - memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); STAILQ_FOREACH(filter, &vnic->filter, next) { /* Default Filter is at Index 0 */ @@ -1414,7 +1471,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) continue; rc = bnxt_hwrm_clear_l2_filter(bp, filter); if (rc) - break; + return rc; memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN); memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN); filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; @@ -1423,15 +1480,17 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); if (rc) - break; + return rc; filter->mac_index = 0; - RTE_LOG(DEBUG, PMD, "Set MAC addr\n"); + PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); } + + return 0; } static int bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; @@ -1511,7 +1570,6 @@ bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = 0; - qinfo->conf.txq_flags = txq->txq_flags; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } @@ -1519,26 +1577,24 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) { struct bnxt *bp = eth_dev->data->dev_private; struct rte_eth_dev_info dev_info; - uint32_t max_dev_mtu; uint32_t rc = 0; uint32_t i; bnxt_dev_info_get_op(eth_dev, &dev_info); - max_dev_mtu = dev_info.max_rx_pktlen - - ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2; - if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) { - RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n", - ETHER_MIN_MTU, max_dev_mtu); + if (new_mtu < ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) { + PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n", + ETHER_MIN_MTU, BNXT_MAX_MTU); return -EINVAL; } - if (new_mtu > ETHER_MTU) { bp->flags |= BNXT_FLAG_JUMBO; - eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + bp->eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; } else { - eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + bp->eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; bp->flags &= ~BNXT_FLAG_JUMBO; } @@ -1546,10 +1602,11 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; eth_dev->data->mtu = new_mtu; - RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu); + PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu); for (i = 0; i < bp->nr_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + uint16_t size = 0; vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; @@ -1557,9 +1614,14 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) if (rc) break; - rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); - if (rc) - return rc; + size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); + size -= RTE_PKTMBUF_HEADROOM; + + if (size < new_mtu) { + rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); + if (rc) + return rc; + } } return rc; @@ -1573,7 +1635,7 @@ bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) int rc; if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "PVID cannot be modified for this function\n"); return -ENOTSUP; } @@ -1734,21 +1796,21 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp, if (efilter->ether_type == ETHER_TYPE_IPv4 || efilter->ether_type == ETHER_TYPE_IPv6) { - RTE_LOG(ERR, PMD, "invalid ether_type(0x%04x) in" + PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in" " ethertype filter.", efilter->ether_type); *ret = -EINVAL; goto exit; } if (efilter->queue >= bp->rx_nr_rings) { - RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); + PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); *ret = -EINVAL; goto exit; } - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); - vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); + vnic0 = &bp->vnic_info[0]; + vnic = &bp->vnic_info[efilter->queue]; if (vnic == NULL) { - RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); + PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); *ret = -EINVAL; goto exit; } @@ -1799,13 +1861,13 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev, return 0; if (arg == NULL) { - RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", filter_op); return -EINVAL; } - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); - vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); + vnic0 = &bp->vnic_info[0]; + vnic = &bp->vnic_info[efilter->queue]; switch (filter_op) { case RTE_ETH_FILTER_ADD: @@ -1816,7 +1878,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev, bfilter = bnxt_get_unused_filter(bp); if (bfilter == NULL) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Not enough resources for a new filter.\n"); return -ENOMEM; } @@ -1860,11 +1922,11 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev, next); bnxt_free_filter(bp, filter1); } else if (ret == 0) { - RTE_LOG(ERR, PMD, "No matching filter found\n"); + PMD_DRV_LOG(ERR, "No matching filter found\n"); } break; default: - RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); ret = -EINVAL; goto error; } @@ -1883,7 +1945,7 @@ parse_ntuple_filter(struct bnxt *bp, uint32_t en = 0; if (nfilter->queue >= bp->rx_nr_rings) { - RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue); + PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue); return -EINVAL; } @@ -1895,7 +1957,7 @@ parse_ntuple_filter(struct bnxt *bp, NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; break; default: - RTE_LOG(ERR, PMD, "invalid dst_port mask."); + PMD_DRV_LOG(ERR, "invalid dst_port mask."); return -EINVAL; } @@ -1913,7 +1975,7 @@ parse_ntuple_filter(struct bnxt *bp, en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; break; default: - RTE_LOG(ERR, PMD, "invalid protocol mask."); + PMD_DRV_LOG(ERR, "invalid protocol mask."); return -EINVAL; } @@ -1925,7 +1987,7 @@ parse_ntuple_filter(struct bnxt *bp, NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; break; default: - RTE_LOG(ERR, PMD, "invalid dst_ip mask."); + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); return -EINVAL; } @@ -1937,7 +1999,7 @@ parse_ntuple_filter(struct bnxt *bp, NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; break; default: - RTE_LOG(ERR, PMD, "invalid src_ip mask."); + PMD_DRV_LOG(ERR, "invalid src_ip mask."); return -EINVAL; } @@ -1949,7 +2011,7 @@ parse_ntuple_filter(struct bnxt *bp, NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; break; default: - RTE_LOG(ERR, PMD, "invalid src_port mask."); + PMD_DRV_LOG(ERR, "invalid src_port mask."); return -EINVAL; } @@ -2002,18 +2064,18 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp, int ret; if (nfilter->flags != RTE_5TUPLE_FLAGS) { - RTE_LOG(ERR, PMD, "only 5tuple is supported."); + PMD_DRV_LOG(ERR, "only 5tuple is supported."); return -EINVAL; } if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { - RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n"); + PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n"); return -EINVAL; } bfilter = bnxt_get_unused_filter(bp); if (bfilter == NULL) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Not enough resources for a new filter.\n"); return -ENOMEM; } @@ -2021,8 +2083,8 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp, if (ret < 0) goto free_filter; - vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]); - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + vnic = &bp->vnic_info[nfilter->queue]; + vnic0 = &bp->vnic_info[0]; filter1 = STAILQ_FIRST(&vnic0->filter); if (filter1 == NULL) { ret = -1; @@ -2040,7 +2102,7 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp, if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && bfilter->dst_id == mfilter->dst_id) { - RTE_LOG(ERR, PMD, "filter exists.\n"); + PMD_DRV_LOG(ERR, "filter exists.\n"); ret = -EEXIST; goto free_filter; } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && @@ -2049,12 +2111,12 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp, ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter); STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next); STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next); - RTE_LOG(ERR, PMD, "filter with matching pattern exists.\n"); - RTE_LOG(ERR, PMD, " Updated it to the new destination queue\n"); + PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n"); + PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n"); goto free_filter; } if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { - RTE_LOG(ERR, PMD, "filter doesn't exist."); + PMD_DRV_LOG(ERR, "filter doesn't exist."); ret = -ENOENT; goto free_filter; } @@ -2099,7 +2161,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev, return 0; if (arg == NULL) { - RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", filter_op); return -EINVAL; } @@ -2116,7 +2178,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev, filter_op); break; default: - RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); ret = -EINVAL; break; } @@ -2315,10 +2377,10 @@ bnxt_parse_fdir_filter(struct bnxt *bp, return -EINVAL; } - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); - vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); + vnic0 = &bp->vnic_info[0]; + vnic = &bp->vnic_info[fdir->action.rx_queue]; if (vnic == NULL) { - RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue); + PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue); return -EINVAL; } @@ -2354,7 +2416,8 @@ bnxt_parse_fdir_filter(struct bnxt *bp, } static struct bnxt_filter_info * -bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf) +bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf, + struct bnxt_vnic_info **mvnic) { struct bnxt_filter_info *mf = NULL; int i; @@ -2392,8 +2455,11 @@ bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf) !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, sizeof(nf->dst_ipaddr)) && !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, - sizeof(nf->dst_ipaddr_mask))) + sizeof(nf->dst_ipaddr_mask))) { + if (mvnic) + *mvnic = vnic; return mf; + } } } return NULL; @@ -2407,7 +2473,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev, struct bnxt *bp = (struct bnxt *)dev->data->dev_private; struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; struct bnxt_filter_info *filter, *match; - struct bnxt_vnic_info *vnic; + struct bnxt_vnic_info *vnic, *mvnic; int ret = 0, i; if (filter_op == RTE_ETH_FILTER_NOP) @@ -2422,7 +2488,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev, /* FALLTHROUGH */ filter = bnxt_get_unused_filter(bp); if (filter == NULL) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); return -ENOMEM; } @@ -2432,24 +2498,38 @@ bnxt_fdir_filter(struct rte_eth_dev *dev, goto free_filter; filter->filter_type = HWRM_CFA_NTUPLE_FILTER; - match = bnxt_match_fdir(bp, filter); + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) + vnic = &bp->vnic_info[0]; + else + vnic = &bp->vnic_info[fdir->action.rx_queue]; + + match = bnxt_match_fdir(bp, filter, &mvnic); if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { - RTE_LOG(ERR, PMD, "Flow already exists.\n"); - ret = -EEXIST; - goto free_filter; + if (match->dst_id == vnic->fw_vnic_id) { + PMD_DRV_LOG(ERR, "Flow already exists.\n"); + ret = -EEXIST; + goto free_filter; + } else { + match->dst_id = vnic->fw_vnic_id; + ret = bnxt_hwrm_set_ntuple_filter(bp, + match->dst_id, + match); + STAILQ_REMOVE(&mvnic->filter, match, + bnxt_filter_info, next); + STAILQ_INSERT_TAIL(&vnic->filter, match, next); + PMD_DRV_LOG(ERR, + "Filter with matching pattern exist\n"); + PMD_DRV_LOG(ERR, + "Updated it to new destination q\n"); + goto free_filter; + } } if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { - RTE_LOG(ERR, PMD, "Flow does not exist.\n"); + PMD_DRV_LOG(ERR, "Flow does not exist.\n"); ret = -ENOENT; goto free_filter; } - if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) - vnic = STAILQ_FIRST(&bp->ff_pool[0]); - else - vnic = - STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); - if (filter_op == RTE_ETH_FILTER_ADD) { ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, @@ -2485,11 +2565,10 @@ bnxt_fdir_filter(struct rte_eth_dev *dev, case RTE_ETH_FILTER_UPDATE: case RTE_ETH_FILTER_STATS: case RTE_ETH_FILTER_INFO: - /* FALLTHROUGH */ - RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op); + PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op); break; default: - RTE_LOG(ERR, PMD, "unknown operation %u", filter_op); + PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); ret = -EINVAL; break; } @@ -2510,7 +2589,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused, switch (filter_type) { case RTE_ETH_FILTER_TUNNEL: - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "filter type: %d: To be implemented\n", filter_type); break; case RTE_ETH_FILTER_FDIR: @@ -2528,7 +2607,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused, *(const void **)arg = &bnxt_flow_ops; break; default: - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Filter type (%d) not supported", filter_type); ret = -EINVAL; break; @@ -2571,7 +2650,7 @@ static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, return -ERANGE; } win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; - rte_cpu_to_le_32(rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off)); + rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); return 0; } @@ -2602,10 +2681,10 @@ static int bnxt_map_ptp_regs(struct bnxt *bp) static void bnxt_unmap_ptp_regs(struct bnxt *bp) { - rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 + - BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16)); - rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 + - BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20)); + rte_write32(0, (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); + rte_write32(0, (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); } static uint64_t bnxt_cc_read(struct bnxt *bp) @@ -2655,8 +2734,8 @@ static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) return -EAGAIN; port_id = pf->port_id; - rte_cpu_to_le_32(rte_write32(1 << port_id, (uint8_t *)bp->bar0 + - ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV])); + rte_write32(1 << port_id, (uint8_t *)bp->bar0 + + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); @@ -2822,8 +2901,8 @@ bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) uint32_t dir_entries; uint32_t entry_length; - RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n", - __func__, bp->pdev->addr.domain, bp->pdev->addr.bus, + PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n", + bp->pdev->addr.domain, bp->pdev->addr.bus, bp->pdev->addr.devid, bp->pdev->addr.function); rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); @@ -2841,8 +2920,8 @@ bnxt_get_eeprom_op(struct rte_eth_dev *dev, uint32_t index; uint32_t offset; - RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " - "len = %d\n", __func__, bp->pdev->addr.domain, + PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d " + "len = %d\n", bp->pdev->addr.domain, bp->pdev->addr.bus, bp->pdev->addr.devid, bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); @@ -2872,6 +2951,7 @@ static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) case BNX_DIR_TYPE_KONG_PATCH: case BNX_DIR_TYPE_BONO_FW: case BNX_DIR_TYPE_BONO_PATCH: + /* FALLTHROUGH */ return true; } @@ -2890,6 +2970,7 @@ static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) case BNX_DIR_TYPE_ISCSI_BOOT: case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: + /* FALLTHROUGH */ return true; } @@ -2910,13 +2991,13 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev, uint8_t index, dir_op; uint16_t type, ext, ordinal, attr; - RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " - "len = %d\n", __func__, bp->pdev->addr.domain, + PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d " + "len = %d\n", bp->pdev->addr.domain, bp->pdev->addr.bus, bp->pdev->addr.devid, bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); if (!BNXT_PF(bp)) { - RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n"); + PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); return -EINVAL; } @@ -3003,6 +3084,10 @@ static const struct eth_dev_ops bnxt_dev_ops = { .rx_queue_count = bnxt_rx_queue_count_op, .rx_descriptor_status = bnxt_rx_descriptor_status_op, .tx_descriptor_status = bnxt_tx_descriptor_status_op, + .rx_queue_start = bnxt_rx_queue_start, + .rx_queue_stop = bnxt_rx_queue_stop, + .tx_queue_start = bnxt_tx_queue_start, + .tx_queue_stop = bnxt_tx_queue_stop, .filter_ctrl = bnxt_filter_ctrl_op, .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, .get_eeprom_length = bnxt_get_eeprom_length_op, @@ -3024,7 +3109,20 @@ static bool bnxt_vf_pciid(uint16_t id) id == BROADCOM_DEV_ID_5731X_VF || id == BROADCOM_DEV_ID_5741X_VF || id == BROADCOM_DEV_ID_57414_VF || - id == BROADCOM_DEV_ID_STRATUS_NIC_VF) + id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 || + id == BROADCOM_DEV_ID_58802_VF) + return true; + return false; +} + +bool bnxt_stratus_device(struct bnxt *bp) +{ + uint16_t id = bp->pdev->id.device_id; + + if (id == BROADCOM_DEV_ID_STRATUS_NIC || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF2) return true; return false; } @@ -3037,7 +3135,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev) /* enable device (incl. PCI PM wakeup), and bus-mastering */ if (!pci_dev->mem_resource[0].addr) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Cannot find PCI device base address, aborting\n"); rc = -ENODEV; goto init_err_disable; @@ -3048,22 +3146,33 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev) bp->bar0 = (void *)pci_dev->mem_resource[0].addr; if (!bp->bar0) { - RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n"); + PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n"); rc = -ENOMEM; goto init_err_release; } + + if (!pci_dev->mem_resource[2].addr) { + PMD_DRV_LOG(ERR, + "Cannot find PCI device BAR 2 address, aborting\n"); + rc = -ENODEV; + goto init_err_release; + } else { + bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; + } + return 0; init_err_release: if (bp->bar0) bp->bar0 = NULL; + if (bp->doorbell_base) + bp->doorbell_base = NULL; init_err_disable: return rc; } -static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); #define ALLOW_FUNC(x) \ { \ @@ -3084,13 +3193,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) int rc; if (version_printed++ == 0) - RTE_LOG(INFO, PMD, "%s\n", bnxt_version); + PMD_DRV_LOG(INFO, "%s\n", bnxt_version); rte_eth_copy_pci_info(eth_dev, pci_dev); bp = eth_dev->data->dev_private; - rte_atomic64_init(&bp->rx_mbuf_alloc_fail); bp->dev_stopped = 1; if (rte_eal_process_type() != RTE_PROC_PRIMARY) @@ -3101,18 +3209,18 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) rc = bnxt_init_board(eth_dev); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Board initialization failed rc: %x\n", rc); goto error; } skip_init: eth_dev->dev_ops = &bnxt_dev_ops; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; eth_dev->rx_pkt_burst = &bnxt_recv_pkts; eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; - if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) { + if (pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) { snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, @@ -3120,25 +3228,26 @@ skip_init: mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; mz = rte_memzone_lookup(mz_name); total_alloc_len = RTE_CACHE_LINE_ROUNDUP( - sizeof(struct rx_port_stats) + 512); + sizeof(struct rx_port_stats) + + sizeof(struct rx_port_stats_ext) + + 512); if (!mz) { mz = rte_memzone_reserve(mz_name, total_alloc_len, - SOCKET_ID_ANY, - RTE_MEMZONE_2MB | - RTE_MEMZONE_SIZE_HINT_ONLY); + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG); if (mz == NULL) return -ENOMEM; } memset(mz->addr, 0, mz->len); mz_phys_addr = mz->iova; if ((unsigned long)mz->addr == mz_phys_addr) { - RTE_LOG(WARNING, PMD, - "Memzone physical address same as virtual.\n"); - RTE_LOG(WARNING, PMD, - "Using rte_mem_virt2iova()\n"); + PMD_DRV_LOG(INFO, + "Memzone physical address same as virtual using rte_mem_virt2iova()\n"); mz_phys_addr = rte_mem_virt2iova(mz->addr); if (mz_phys_addr == 0) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "unable to map address to physical memory\n"); return -ENOMEM; } @@ -3155,25 +3264,29 @@ skip_init: mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; mz = rte_memzone_lookup(mz_name); total_alloc_len = RTE_CACHE_LINE_ROUNDUP( - sizeof(struct tx_port_stats) + 512); + sizeof(struct tx_port_stats) + + sizeof(struct tx_port_stats_ext) + + 512); if (!mz) { - mz = rte_memzone_reserve(mz_name, total_alloc_len, - SOCKET_ID_ANY, - RTE_MEMZONE_2MB | - RTE_MEMZONE_SIZE_HINT_ONLY); + mz = rte_memzone_reserve(mz_name, + total_alloc_len, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG); if (mz == NULL) return -ENOMEM; } memset(mz->addr, 0, mz->len); mz_phys_addr = mz->iova; if ((unsigned long)mz->addr == mz_phys_addr) { - RTE_LOG(WARNING, PMD, + PMD_DRV_LOG(WARNING, "Memzone physical address same as virtual.\n"); - RTE_LOG(WARNING, PMD, + PMD_DRV_LOG(WARNING, "Using rte_mem_virt2iova()\n"); mz_phys_addr = rte_mem_virt2iova(mz->addr); if (mz_phys_addr == 0) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "unable to map address to physical memory\n"); return -ENOMEM; } @@ -3184,11 +3297,33 @@ skip_init: bp->hw_tx_port_stats_map = mz_phys_addr; bp->flags |= BNXT_FLAG_PORT_STATS; + + /* Display extended statistics if FW supports it */ + if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || + bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0) + goto skip_ext_stats; + + bp->hw_rx_port_stats_ext = (void *) + (bp->hw_rx_port_stats + sizeof(struct rx_port_stats)); + bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + + sizeof(struct rx_port_stats); + bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; + + + if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2) { + bp->hw_tx_port_stats_ext = (void *) + (bp->hw_tx_port_stats + sizeof(struct tx_port_stats)); + bp->hw_tx_port_stats_ext_map = + bp->hw_tx_port_stats_map + + sizeof(struct tx_port_stats); + bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; + } } +skip_ext_stats: rc = bnxt_alloc_hwrm_resources(bp); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "hwrm resource allocation failure rc: %x\n", rc); goto error_free; } @@ -3197,43 +3332,53 @@ skip_init: goto error_free; rc = bnxt_hwrm_queue_qportcfg(bp); if (rc) { - RTE_LOG(ERR, PMD, "hwrm queue qportcfg failed\n"); + PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n"); goto error_free; } rc = bnxt_hwrm_func_qcfg(bp); if (rc) { - RTE_LOG(ERR, PMD, "hwrm func qcfg failed\n"); + PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n"); goto error_free; } /* Get the MAX capabilities for this function */ rc = bnxt_hwrm_func_qcaps(bp); if (rc) { - RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc); + PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc); goto error_free; } if (bp->max_tx_rings == 0) { - RTE_LOG(ERR, PMD, "No TX rings available!\n"); + PMD_DRV_LOG(ERR, "No TX rings available!\n"); rc = -EBUSY; goto error_free; } eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", ETHER_ADDR_LEN * bp->max_l2_ctx, 0); if (eth_dev->data->mac_addrs == NULL) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Failed to alloc %u bytes needed to store MAC addr tbl", ETHER_ADDR_LEN * bp->max_l2_ctx); rc = -ENOMEM; goto error_free; } + + if (bnxt_check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) { + PMD_DRV_LOG(ERR, + "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n", + bp->dflt_mac_addr[0], bp->dflt_mac_addr[1], + bp->dflt_mac_addr[2], bp->dflt_mac_addr[3], + bp->dflt_mac_addr[4], bp->dflt_mac_addr[5]); + rc = -EINVAL; + goto error_free; + } /* Copy the permanent MAC from the qcap response address now. */ memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr)); memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); if (bp->max_ring_grps < bp->rx_cp_nr_rings) { /* 1 ring is for default completion ring */ - RTE_LOG(ERR, PMD, "Insufficient resource: Ring Group\n"); + PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n"); rc = -ENOSPC; goto error_free; } @@ -3241,7 +3386,7 @@ skip_init: bp->grp_info = rte_zmalloc("bnxt_grp_info", sizeof(*bp->grp_info) * bp->max_ring_grps, 0); if (!bp->grp_info) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Failed to alloc %zu bytes to store group info table\n", sizeof(*bp->grp_info) * bp->max_ring_grps); rc = -ENOMEM; @@ -3254,7 +3399,7 @@ skip_init: ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); } else { - RTE_LOG(WARNING, PMD, + PMD_DRV_LOG(WARNING, "Firmware too old for VF mailbox functionality\n"); memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); } @@ -3274,20 +3419,20 @@ skip_init: ALLOW_FUNC(HWRM_VNIC_TPA_CFG); rc = bnxt_hwrm_func_driver_register(bp); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Failed to register driver"); rc = -EBUSY; goto error_free; } - RTE_LOG(INFO, PMD, + PMD_DRV_LOG(INFO, DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n", pci_dev->mem_resource[0].phys_addr, pci_dev->mem_resource[0].addr); rc = bnxt_hwrm_func_reset(bp); if (rc) { - RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc); + PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc); rc = -EIO; goto error_free; } @@ -3299,13 +3444,13 @@ skip_init: if (bp->pdev->max_vfs) { rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); if (rc) { - RTE_LOG(ERR, PMD, "Failed to allocate VFs\n"); + PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); goto error_free; } } else { rc = bnxt_hwrm_allocate_pf_only(bp); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Failed to allocate PF resources\n"); goto error_free; } @@ -3326,17 +3471,13 @@ skip_init: if (rc) goto error_free_int; - rc = bnxt_alloc_def_cp_ring(bp); - if (rc) - goto error_free_int; - bnxt_enable_int(bp); + bnxt_init_nic(bp); return 0; error_free_int: bnxt_disable_int(bp); - bnxt_free_def_cp_ring(bp); bnxt_hwrm_func_buf_unrgtr(bp); bnxt_free_int(bp); bnxt_free_mem(bp); @@ -3347,28 +3488,35 @@ error: } static int -bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { +bnxt_dev_uninit(struct rte_eth_dev *eth_dev) +{ struct bnxt *bp = eth_dev->data->dev_private; int rc; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -EPERM; + PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); bnxt_disable_int(bp); bnxt_free_int(bp); bnxt_free_mem(bp); - if (eth_dev->data->mac_addrs != NULL) { - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; - } if (bp->grp_info != NULL) { rte_free(bp->grp_info); bp->grp_info = NULL; } rc = bnxt_hwrm_func_driver_unregister(bp, 0); bnxt_free_hwrm_resources(bp); - rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); - rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); + + if (bp->tx_mem_zone) { + rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); + bp->tx_mem_zone = NULL; + } + + if (bp->rx_mem_zone) { + rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); + bp->rx_mem_zone = NULL; + } + if (bp->dev_stopped == 0) bnxt_dev_close_op(eth_dev); if (bp->pf.vf_info) @@ -3389,13 +3537,17 @@ static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, static int bnxt_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit); + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + return rte_eth_dev_pci_generic_remove(pci_dev, + bnxt_dev_uninit); + else + return rte_eth_dev_pci_generic_remove(pci_dev, NULL); } static struct rte_pci_driver bnxt_rte_pmd = { .id_table = bnxt_pci_id_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | - RTE_PCI_DRV_INTR_LSC, + RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_IOVA_AS_VA, .probe = bnxt_pci_probe, .remove = bnxt_pci_remove, }; @@ -3414,6 +3566,13 @@ bool is_bnxt_supported(struct rte_eth_dev *dev) return is_device_supported(dev, &bnxt_rte_pmd); } +RTE_INIT(bnxt_init_log) +{ + bnxt_logtype_driver = rte_log_register("pmd.net.bnxt.driver"); + if (bnxt_logtype_driver >= 0) + rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE); +} + RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");