X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fbnxt%2Fbnxt_ethdev.c;h=0e893cc95686e7afe1e213d73c7453844d06705d;hb=cc5e26b8ef98f6e10206245ab0ad578511ae5ed2;hp=5985963bf72e8d9fdbef54d79ef49b5e248087d4;hpb=f35eaaca5f5f09b48f5d96adbaccc4484aa6e058;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 5985963bf7..0e893cc956 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -151,6 +151,7 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { DEV_TX_OFFLOAD_GRE_TNL_TSO | \ DEV_TX_OFFLOAD_IPIP_TNL_TSO | \ DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \ + DEV_TX_OFFLOAD_QINQ_INSERT | \ DEV_TX_OFFLOAD_MULTI_SEGS) #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \ @@ -161,6 +162,7 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ DEV_RX_OFFLOAD_JUMBO_FRAME | \ DEV_RX_OFFLOAD_KEEP_CRC | \ + DEV_RX_OFFLOAD_VLAN_EXTEND | \ DEV_RX_OFFLOAD_TCP_LRO) static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); @@ -187,7 +189,7 @@ int is_bnxt_in_error(struct bnxt *bp) * High level utility functions */ -static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) +uint16_t bnxt_rss_ctxts(const struct bnxt *bp) { if (!BNXT_CHIP_THOR(bp)) return 1; @@ -316,17 +318,10 @@ static int bnxt_init_chip(struct bnxt *bp) for (i = 0; i < bp->nr_vnics; i++) { struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; - uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps; - vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0); - if (!vnic->fw_grp_ids) { - PMD_DRV_LOG(ERR, - "Failed to alloc %d bytes for group ids\n", - size); - rc = -ENOMEM; + rc = bnxt_vnic_grp_alloc(bp, vnic); + if (rc) goto err_out; - } - memset(vnic->fw_grp_ids, -1, size); PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", i, vnic, vnic->fw_grp_ids); @@ -382,7 +377,7 @@ static int bnxt_init_chip(struct bnxt *bp) goto err_out; } - for (j = 0; j < bp->rx_nr_rings; j++) { + for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { rxq = bp->eth_dev->data->rx_queues[j]; PMD_DRV_LOG(DEBUG, @@ -1006,6 +1001,53 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, } } +static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, + struct rte_ether_addr *mac_addr, uint32_t index) +{ + struct bnxt_filter_info *filter; + int rc = 0; + + filter = STAILQ_FIRST(&vnic->filter); + /* During bnxt_mac_addr_add_op, default MAC is + * already programmed, so skip it. But, when + * hw-vlan-filter is turned OFF from ON, default + * MAC filter should be restored + */ + if (filter->dflt) + return 0; + + filter = bnxt_alloc_filter(bp); + if (!filter) { + PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); + return -ENODEV; + } + + filter->mac_index = index; + /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, + * if the MAC that's been programmed now is a different one, then, + * copy that addr to filter->l2_addr + */ + if (mac_addr) + memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); + filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; + + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); + if (!rc) { + if (filter->mac_index == 0) { + filter->dflt = true; + STAILQ_INSERT_HEAD(&vnic->filter, filter, next); + } else { + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + } + } else { + filter->mac_index = INVALID_MAC_INDEX; + memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN); + bnxt_free_filter(bp, filter); + } + + return rc; +} + static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool) @@ -1036,23 +1078,8 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, return 0; } } - filter = bnxt_alloc_filter(bp); - if (!filter) { - PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); - return -ENODEV; - } - - filter->mac_index = index; - memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); - rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); - if (!rc) { - STAILQ_INSERT_TAIL(&vnic->filter, filter, next); - } else { - filter->mac_index = INVALID_MAC_INDEX; - memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN); - bnxt_free_filter(bp, filter); - } + rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index); return rc; } @@ -1116,7 +1143,7 @@ static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) if (bp->vnic_info == NULL) return 0; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); old_flags = vnic->flags; vnic->flags |= BNXT_VNIC_INFO_PROMISC; @@ -1141,7 +1168,7 @@ static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) if (bp->vnic_info == NULL) return 0; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); old_flags = vnic->flags; vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; @@ -1166,7 +1193,7 @@ static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) if (bp->vnic_info == NULL) return 0; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); old_flags = vnic->flags; vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; @@ -1191,7 +1218,7 @@ static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) if (bp->vnic_info == NULL) return 0; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); old_flags = vnic->flags; vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; @@ -1351,8 +1378,6 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, struct bnxt *bp = eth_dev->data->dev_private; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_vnic_info *vnic; - uint16_t hash_type = 0; - unsigned int i; int rc; rc = is_bnxt_in_error(bp); @@ -1374,35 +1399,26 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, bp->flags |= BNXT_FLAG_UPDATE_HASH; memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf)); - if (rss_conf->rss_hf & ETH_RSS_IPV4) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; - if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; - if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; - if (rss_conf->rss_hf & ETH_RSS_IPV6) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; - if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; - if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) - hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; - - /* Update the RSS VNIC(s) */ - for (i = 0; i < bp->nr_vnics; i++) { - vnic = &bp->vnic_info[i]; - vnic->hash_type = hash_type; + /* Update the default RSS VNIC(s) */ + vnic = &bp->vnic_info[0]; + vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); - /* - * Use the supplied key if the key length is - * acceptable and the rss_key is not NULL - */ - if (rss_conf->rss_key && - rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) - memcpy(vnic->rss_hash_key, rss_conf->rss_key, - rss_conf->rss_key_len); + /* + * If hashkey is not specified, use the previously configured + * hashkey + */ + if (!rss_conf->rss_key) + goto rss_config; - bnxt_hwrm_vnic_rss_cfg(bp, vnic); + if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { + PMD_DRV_LOG(ERR, + "Invalid hashkey length, should be 16 bytes\n"); + return -EINVAL; } + memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); + +rss_config: + bnxt_hwrm_vnic_rss_cfg(bp, vnic); return 0; } @@ -1698,9 +1714,10 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) filter = STAILQ_FIRST(&vnic->filter); while (filter) { /* Search for this matching MAC+VLAN filter */ - if (filter->enables & chk && filter->l2_ivlan == vlan_id && - !memcmp(filter->l2_addr, - bp->mac_addr, + if ((filter->enables & chk) && + (filter->l2_ivlan == vlan_id && + filter->l2_ivlan_mask != 0) && + !memcmp(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN)) { /* Delete the filter */ rc = bnxt_hwrm_clear_l2_filter(bp, filter); @@ -1741,8 +1758,11 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) filter = STAILQ_FIRST(&vnic->filter); /* Check if the VLAN has already been added */ while (filter) { - if (filter->enables & chk && filter->l2_ivlan == vlan_id && - !memcmp(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN)) + if ((filter->enables & chk) && + (filter->l2_ivlan == vlan_id && + filter->l2_ivlan_mask == 0x0FFF) && + !memcmp(filter->l2_addr, bp->mac_addr, + RTE_ETHER_ADDR_LEN)) return -EEXIST; filter = STAILQ_NEXT(filter, next); @@ -1758,9 +1778,17 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) return -ENOMEM; } /* MAC + VLAN ID filter */ + /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only + * untagged packets are received + * + * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged + * packets and only the programmed vlan's packets are received + */ filter->l2_ivlan = vlan_id; filter->l2_ivlan_mask = 0x0FFF; filter->enables |= en; + filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); if (rc) { /* Free the newly allocated filter as we were @@ -1769,10 +1797,16 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) filter->fw_l2_filter_id = UINT64_MAX; STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); return rc; + } else { + /* Add this new filter to the list */ + if (vlan_id == 0) { + filter->dflt = true; + STAILQ_INSERT_HEAD(&vnic->filter, filter, next); + } else { + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + } } - /* Add this new filter to the list */ - STAILQ_INSERT_TAIL(&vnic->filter, filter, next); PMD_DRV_LOG(INFO, "Added Vlan filter for %d\n", vlan_id); return rc; @@ -1795,11 +1829,39 @@ static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, return bnxt_del_vlan_filter(bp, vlan_id); } +static int bnxt_del_dflt_mac_filter(struct bnxt *bp, + struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *filter; + int rc; + + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + if (filter->dflt && + !memcmp(filter->l2_addr, bp->mac_addr, + RTE_ETHER_ADDR_LEN)) { + rc = bnxt_hwrm_clear_l2_filter(bp, filter); + if (rc) + return rc; + filter->dflt = false; + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + STAILQ_INSERT_TAIL(&bp->free_filter_list, + filter, next); + filter->fw_l2_filter_id = -1; + break; + } + filter = STAILQ_NEXT(filter, next); + } + return 0; +} + static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) { struct bnxt *bp = dev->data->dev_private; uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; + struct bnxt_vnic_info *vnic; unsigned int i; int rc; @@ -1807,15 +1869,28 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) if (rc) return rc; - if (mask & ETH_VLAN_FILTER_MASK) { - if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { - /* Remove any VLAN filters programmed */ - for (i = 0; i < 4095; i++) - bnxt_del_vlan_filter(bp, i); - } - PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", - !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); + vnic = BNXT_GET_DEFAULT_VNIC(bp); + if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { + /* Remove any VLAN filters programmed */ + for (i = 0; i < 4095; i++) + bnxt_del_vlan_filter(bp, i); + + rc = bnxt_add_mac_filter(bp, vnic, NULL, 0); + if (rc) + return rc; + } else { + /* Default filter will allow packets that match the + * dest mac. So, it has to be deleted, otherwise, we + * will endup receiving vlan packets for which the + * filter is not programmed, when hw-vlan-filter + * configuration is ON + */ + bnxt_del_dflt_mac_filter(bp, vnic); + /* This filter will allow only untagged packets */ + bnxt_add_vlan_filter(bp, 0); } + PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", + !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ @@ -1831,15 +1906,77 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); } - if (mask & ETH_VLAN_EXTEND_MASK) - PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n"); + if (mask & ETH_VLAN_EXTEND_MASK) { + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); + else + PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); + } + + return 0; +} + +static int +bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct bnxt *bp = dev->data->dev_private; + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; + + if (vlan_type != ETH_VLAN_TYPE_INNER && + vlan_type != ETH_VLAN_TYPE_OUTER) { + PMD_DRV_LOG(ERR, + "Unsupported vlan type."); + return -EINVAL; + } + if (!qinq) { + PMD_DRV_LOG(ERR, + "QinQ not enabled. Needs to be ON as we can " + "accelerate only outer vlan\n"); + return -EINVAL; + } + + if (vlan_type == ETH_VLAN_TYPE_OUTER) { + switch (tpid) { + case RTE_ETHER_TYPE_QINQ: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; + break; + case RTE_ETHER_TYPE_VLAN: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; + break; + case 0x9100: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; + break; + case 0x9200: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; + break; + case 0x9300: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; + break; + default: + PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); + return -EINVAL; + } + bp->outer_tpid_bd |= tpid; + PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); + } else if (vlan_type == ETH_VLAN_TYPE_INNER) { + PMD_DRV_LOG(ERR, + "Can accelerate only outer vlan in QinQ\n"); + return -EINVAL; + } return 0; } static int bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, - struct rte_ether_addr *addr) + struct rte_ether_addr *addr) { struct bnxt *bp = dev->data->dev_private; /* Default Filter is tied to VNIC 0 */ @@ -1862,16 +1999,20 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, if (filter->mac_index != 0) continue; - memcpy(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN); + memcpy(filter->l2_addr, addr, RTE_ETHER_ADDR_LEN); memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); - filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; + filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; filter->enables |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); - if (rc) + if (rc) { + memcpy(filter->l2_addr, bp->mac_addr, + RTE_ETHER_ADDR_LEN); return rc; + } memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); @@ -1896,7 +2037,7 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, if (rc) return rc; - vnic = &bp->vnic_info[0]; + vnic = BNXT_GET_DEFAULT_VNIC(bp); if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; @@ -1912,6 +2053,10 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, } vnic->mc_addr_cnt = i; + if (vnic->mc_addr_cnt) + vnic->flags |= BNXT_VNIC_INFO_MCAST; + else + vnic->flags &= ~BNXT_VNIC_INFO_MCAST; allmulti: return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); @@ -3534,6 +3679,7 @@ static const struct eth_dev_ops bnxt_dev_ops = { .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, .vlan_filter_set = bnxt_vlan_filter_set_op, .vlan_offload_set = bnxt_vlan_offload_set_op, + .vlan_tpid_set = bnxt_vlan_tpid_set_op, .vlan_pvid_set = bnxt_vlan_pvid_set_op, .mtu_set = bnxt_mtu_set_op, .mac_addr_set = bnxt_set_default_mac_addr_op, @@ -4146,7 +4292,9 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp) if (rc) return rc; - entries = ctx->qp_max_l2_entries; + entries = ctx->qp_max_l2_entries + + ctx->vnic_max_vnic_entries + + ctx->tqm_min_entries_per_ring; entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring, ctx->tqm_max_entries_per_ring); @@ -4392,6 +4540,10 @@ static int bnxt_init_fw(struct bnxt *bp) if (rc) return -EIO; + rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp); + if (rc) + return rc; + rc = bnxt_hwrm_queue_qportcfg(bp); if (rc) return rc; @@ -4419,6 +4571,17 @@ static int bnxt_init_fw(struct bnxt *bp) return 0; } +static int +bnxt_init_locks(struct bnxt *bp) +{ + int err; + + err = pthread_mutex_init(&bp->flow_lock, NULL); + if (err) + PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); + return err; +} + static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) { int rc; @@ -4476,6 +4639,10 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) if (rc) return rc; + rc = bnxt_init_locks(bp); + if (rc) + return rc; + return 0; } @@ -4556,6 +4723,12 @@ error_free: return rc; } +static void +bnxt_uninit_locks(struct bnxt *bp) +{ + pthread_mutex_destroy(&bp->flow_lock); +} + static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) { @@ -4617,6 +4790,8 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; + bnxt_uninit_locks(bp); + return rc; }