+ return rc;
+}
+
+static void bnxt_fw_reset_cb(void *arg)
+{
+ struct bnxt *bp = arg;
+ struct bnxt_error_recovery_info *info = bp->recovery_info;
+ int rc = 0;
+
+ /* Only Master function can do FW reset */
+ if (bnxt_is_master_func(bp) &&
+ bnxt_is_recovery_enabled(bp)) {
+ rc = bnxt_fw_reset_all(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Adapter recovery failed\n");
+ return;
+ }
+ }
+
+ /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send
+ * EXCEPTION_FATAL_ASYNC event to all the functions
+ * (including MASTER FUNC). After receiving this Async, all the active
+ * drivers should treat this case as FW initiated recovery
+ */
+ if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
+ bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT;
+ bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT;
+
+ /* To recover from error */
+ rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
+ (void *)bp);
+ }
+}
+
+/* Driver should poll FW heartbeat, reset_counter with the frequency
+ * advertised by FW in HWRM_ERROR_RECOVERY_QCFG.
+ * When the driver detects heartbeat stop or change in reset_counter,
+ * it has to trigger a reset to recover from the error condition.
+ * A “master PF” is the function who will have the privilege to
+ * initiate the chimp reset. The master PF will be elected by the
+ * firmware and will be notified through async message.
+ */
+static void bnxt_check_fw_health(void *arg)
+{
+ struct bnxt *bp = arg;
+ struct bnxt_error_recovery_info *info = bp->recovery_info;
+ uint32_t val = 0, wait_msec;
+
+ if (!info || !bnxt_is_recovery_enabled(bp) ||
+ is_bnxt_in_error(bp))
+ return;
+
+ val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
+ if (val == info->last_heart_beat)
+ goto reset;
+
+ info->last_heart_beat = val;
+
+ val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
+ if (val != info->last_reset_counter)
+ goto reset;
+
+ info->last_reset_counter = val;
+
+ rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq,
+ bnxt_check_fw_health, (void *)bp);
+
+ return;
+reset:
+ /* Stop DMA to/from device */
+ bp->flags |= BNXT_FLAG_FATAL_ERROR;
+ bp->flags |= BNXT_FLAG_FW_RESET;
+
+ PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
+
+ if (bnxt_is_master_func(bp))
+ wait_msec = info->master_func_wait_period;
+ else
+ wait_msec = info->normal_func_wait_period;
+
+ rte_eal_alarm_set(US_PER_MS * wait_msec,
+ bnxt_fw_reset_cb, (void *)bp);
+}
+
+void bnxt_schedule_fw_health_check(struct bnxt *bp)
+{
+ uint32_t polling_freq;
+
+ if (!bnxt_is_recovery_enabled(bp))
+ return;
+
+ if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
+ return;
+
+ polling_freq = bp->recovery_info->driver_polling_freq;
+
+ rte_eal_alarm_set(US_PER_MS * polling_freq,
+ bnxt_check_fw_health, (void *)bp);
+ bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
+}
+
+static void bnxt_cancel_fw_health_check(struct bnxt *bp)
+{
+ if (!bnxt_is_recovery_enabled(bp))
+ return;
+
+ rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp);
+ bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
+}
+
+static bool bnxt_vf_pciid(uint16_t id)
+{
+ if (id == BROADCOM_DEV_ID_57304_VF ||
+ id == BROADCOM_DEV_ID_57406_VF ||
+ id == BROADCOM_DEV_ID_5731X_VF ||
+ id == BROADCOM_DEV_ID_5741X_VF ||
+ id == BROADCOM_DEV_ID_57414_VF ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 ||
+ id == BROADCOM_DEV_ID_58802_VF ||
+ id == BROADCOM_DEV_ID_57500_VF1 ||
+ id == BROADCOM_DEV_ID_57500_VF2)
+ return true;
+ return false;
+}
+
+static bool bnxt_thor_device(uint16_t id)
+{
+ if (id == BROADCOM_DEV_ID_57508 ||
+ id == BROADCOM_DEV_ID_57504 ||
+ id == BROADCOM_DEV_ID_57502 ||
+ id == BROADCOM_DEV_ID_57508_MF1 ||
+ id == BROADCOM_DEV_ID_57504_MF1 ||
+ id == BROADCOM_DEV_ID_57502_MF1 ||
+ id == BROADCOM_DEV_ID_57508_MF2 ||
+ id == BROADCOM_DEV_ID_57504_MF2 ||
+ id == BROADCOM_DEV_ID_57502_MF2 ||
+ id == BROADCOM_DEV_ID_57500_VF1 ||
+ id == BROADCOM_DEV_ID_57500_VF2)
+ return true;
+
+ return false;
+}
+
+bool bnxt_stratus_device(struct bnxt *bp)
+{
+ uint16_t id = bp->pdev->id.device_id;
+
+ if (id == BROADCOM_DEV_ID_STRATUS_NIC ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF2)
+ return true;
+ return false;
+}
+
+static int bnxt_init_board(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct bnxt *bp = eth_dev->data->dev_private;
+
+ /* enable device (incl. PCI PM wakeup), and bus-mastering */
+ bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
+ bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
+ if (!bp->bar0 || !bp->doorbell_base) {
+ PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
+ return -ENODEV;
+ }
+
+ bp->eth_dev = eth_dev;
+ bp->pdev = pci_dev;
+
+ return 0;
+}
+
+static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg,
+ uint32_t mem_size,
+ const char *suffix,
+ uint16_t idx)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ const struct rte_memzone *mz = NULL;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ rte_iova_t mz_phys_addr;
+ uint64_t valid_bits = 0;
+ uint32_t sz;
+ int i;
+
+ if (!mem_size)
+ return 0;
+
+ rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
+ BNXT_PAGE_SIZE;
+ rmem->page_size = BNXT_PAGE_SIZE;
+ rmem->pg_arr = ctx_pg->ctx_pg_arr;
+ rmem->dma_arr = ctx_pg->ctx_dma_arr;
+ rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
+
+ valid_bits = PTU_PTE_VALID;
+
+ if (rmem->nr_pages > 1) {
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_ctx_pg_tbl%s_%x_%d",
+ suffix, idx, bp->eth_dev->data->port_id);
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ if (!mz) {
+ mz = rte_memzone_reserve_aligned(mz_name,
+ rmem->nr_pages * 8,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG,
+ BNXT_PAGE_SIZE);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ PMD_DRV_LOG(DEBUG,
+ "physical address same as virtual\n");
+ PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
+ if (mz_phys_addr == RTE_BAD_IOVA) {
+ PMD_DRV_LOG(ERR,
+ "unable to map addr to phys memory\n");
+ return -ENOMEM;
+ }
+ }
+ rte_mem_lock_page(((char *)mz->addr));
+
+ rmem->pg_tbl = mz->addr;
+ rmem->pg_tbl_map = mz_phys_addr;
+ rmem->pg_tbl_mz = mz;
+ }
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
+ suffix, idx, bp->eth_dev->data->port_id);
+ mz = rte_memzone_lookup(mz_name);
+ if (!mz) {
+ mz = rte_memzone_reserve_aligned(mz_name,
+ mem_size,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_1GB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG,
+ BNXT_PAGE_SIZE);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ PMD_DRV_LOG(DEBUG,
+ "Memzone physical address same as virtual.\n");
+ PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
+ for (sz = 0; sz < mem_size; sz += BNXT_PAGE_SIZE)
+ rte_mem_lock_page(((char *)mz->addr) + sz);
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
+ if (mz_phys_addr == RTE_BAD_IOVA) {
+ PMD_DRV_LOG(ERR,
+ "unable to map addr to phys memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
+ rte_mem_lock_page(((char *)mz->addr) + sz);
+ rmem->pg_arr[i] = ((char *)mz->addr) + sz;
+ rmem->dma_arr[i] = mz_phys_addr + sz;
+
+ if (rmem->nr_pages > 1) {
+ if (i == rmem->nr_pages - 2 &&
+ (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+ valid_bits |= PTU_PTE_NEXT_TO_LAST;
+ else if (i == rmem->nr_pages - 1 &&
+ (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+ valid_bits |= PTU_PTE_LAST;
+
+ rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
+ valid_bits);
+ }
+ }
+
+ rmem->mz = mz;
+ if (rmem->vmem_size)
+ rmem->vmem = (void **)mz->addr;
+ rmem->dma_arr[0] = mz_phys_addr;
+ return 0;
+}
+
+static void bnxt_free_ctx_mem(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
+ return;
+
+ bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
+ rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
+ rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
+ rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
+ rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
+ rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
+
+ for (i = 0; i < BNXT_MAX_Q; i++) {
+ if (bp->ctx->tqm_mem[i])
+ rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
+ }
+
+ rte_free(bp->ctx);
+ bp->ctx = NULL;
+}
+
+#define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1 : __min2; })
+
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1 : __max2; })
+
+#define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max)
+
+int bnxt_alloc_ctx_mem(struct bnxt *bp)
+{
+ struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_info *ctx;
+ uint32_t mem_size, ena, entries;
+ int i, rc;
+
+ rc = bnxt_hwrm_func_backing_store_qcaps(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
+ return rc;
+ }
+ ctx = bp->ctx;
+ if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
+ return 0;
+
+ ctx_pg = &ctx->qp_mem;
+ ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
+ mem_size = ctx->qp_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->srq_mem;
+ ctx_pg->entries = ctx->srq_max_l2_entries;
+ mem_size = ctx->srq_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->cq_mem;
+ ctx_pg->entries = ctx->cq_max_l2_entries;
+ mem_size = ctx->cq_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->vnic_mem;
+ ctx_pg->entries = ctx->vnic_max_vnic_entries +
+ ctx->vnic_max_ring_table_entries;
+ mem_size = ctx->vnic_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->stat_mem;
+ ctx_pg->entries = ctx->stat_max_entries;
+ mem_size = ctx->stat_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
+ if (rc)
+ return rc;
+
+ entries = ctx->qp_max_l2_entries +
+ ctx->vnic_max_vnic_entries +
+ ctx->tqm_min_entries_per_ring;
+ entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
+ entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring,
+ ctx->tqm_max_entries_per_ring);
+ for (i = 0, ena = 0; i < BNXT_MAX_Q; i++) {
+ ctx_pg = ctx->tqm_mem[i];
+ /* use min tqm entries for now. */
+ ctx_pg->entries = entries;
+ mem_size = ctx->tqm_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
+ if (rc)
+ return rc;
+ ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
+ }
+
+ ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
+ rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+ if (rc)
+ PMD_DRV_LOG(ERR,
+ "Failed to configure context mem: rc = %d\n", rc);
+ else
+ ctx->flags |= BNXT_CTX_FLAG_INITED;
+
+ return rc;
+}
+
+static int bnxt_alloc_stats_mem(struct bnxt *bp)
+{
+ struct rte_pci_device *pci_dev = bp->pdev;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz = NULL;
+ uint32_t total_alloc_len;
+ rte_iova_t mz_phys_addr;
+
+ if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
+ return 0;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "rx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
+ sizeof(struct rx_port_stats_ext) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name, total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ PMD_DRV_LOG(DEBUG,
+ "Memzone physical address same as virtual.\n");
+ PMD_DRV_LOG(DEBUG,
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
+ if (mz_phys_addr == RTE_BAD_IOVA) {
+ PMD_DRV_LOG(ERR,
+ "Can't map address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ bp->rx_mem_zone = (const void *)mz;
+ bp->hw_rx_port_stats = mz->addr;
+ bp->hw_rx_port_stats_map = mz_phys_addr;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "tx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
+ sizeof(struct tx_port_stats_ext) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name,
+ total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ PMD_DRV_LOG(DEBUG,
+ "Memzone physical address same as virtual\n");
+ PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
+ if (mz_phys_addr == RTE_BAD_IOVA) {
+ PMD_DRV_LOG(ERR,
+ "Can't map address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ bp->tx_mem_zone = (const void *)mz;
+ bp->hw_tx_port_stats = mz->addr;
+ bp->hw_tx_port_stats_map = mz_phys_addr;
+ bp->flags |= BNXT_FLAG_PORT_STATS;
+
+ /* Display extended statistics if FW supports it */
+ if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
+ bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
+ !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
+ return 0;
+
+ bp->hw_rx_port_stats_ext = (void *)
+ ((uint8_t *)bp->hw_rx_port_stats +
+ sizeof(struct rx_port_stats));
+ bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
+ sizeof(struct rx_port_stats);
+ bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
+
+ if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
+ bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
+ bp->hw_tx_port_stats_ext = (void *)
+ ((uint8_t *)bp->hw_tx_port_stats +
+ sizeof(struct tx_port_stats));
+ bp->hw_tx_port_stats_ext_map =
+ bp->hw_tx_port_stats_map +
+ sizeof(struct tx_port_stats);
+ bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
+ }
+
+ return 0;
+}
+
+static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = eth_dev->data->dev_private;
+ int rc = 0;
+
+ eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
+ RTE_ETHER_ADDR_LEN *
+ bp->max_l2_ctx,
+ 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
+ return -ENOMEM;
+ }
+
+ if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) {
+ if (BNXT_PF(bp))
+ return -EINVAL;
+
+ /* Generate a random MAC address, if none was assigned by PF */
+ PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
+ bnxt_eth_hw_addr_random(bp->mac_addr);
+ PMD_DRV_LOG(INFO,
+ "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
+ bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
+
+ rc = bnxt_hwrm_set_mac(bp);
+ if (!rc)
+ memcpy(&bp->eth_dev->data->mac_addrs[0], bp->mac_addr,
+ RTE_ETHER_ADDR_LEN);
+ return rc;
+ }
+
+ /* Copy the permanent MAC from the FUNC_QCAPS response */
+ memcpy(bp->mac_addr, bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN);
+ memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
+
+ return rc;
+}
+
+static int bnxt_restore_dflt_mac(struct bnxt *bp)
+{
+ int rc = 0;
+
+ /* MAC is already configured in FW */
+ if (!bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN))
+ return 0;
+
+ /* Restore the old MAC configured */
+ rc = bnxt_hwrm_set_mac(bp);
+ if (rc)
+ PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
+
+ return rc;
+}
+
+static void bnxt_config_vf_req_fwd(struct bnxt *bp)
+{
+ if (!BNXT_PF(bp))
+ return;
+
+#define ALLOW_FUNC(x) \
+ { \
+ uint32_t arg = (x); \
+ bp->pf.vf_req_fwd[((arg) >> 5)] &= \
+ ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
+ }
+
+ /* Forward all requests if firmware is new enough */
+ if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
+ (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
+ ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
+ memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
+ } else {
+ PMD_DRV_LOG(WARNING,
+ "Firmware too old for VF mailbox functionality\n");