+ uint64_t valid_bits = 0;
+ uint32_t sz;
+ int i;
+
+ if (!mem_size)
+ return 0;
+
+ rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
+ BNXT_PAGE_SIZE;
+ rmem->page_size = BNXT_PAGE_SIZE;
+ rmem->pg_arr = ctx_pg->ctx_pg_arr;
+ rmem->dma_arr = ctx_pg->ctx_dma_arr;
+ rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
+
+ valid_bits = PTU_PTE_VALID;
+
+ if (rmem->nr_pages > 1) {
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_ctx_pg_tbl%s_%x_%d",
+ suffix, idx, bp->eth_dev->data->port_id);
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ if (!mz) {
+ mz = rte_memzone_reserve_aligned(mz_name,
+ rmem->nr_pages * 8,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG,
+ BNXT_PAGE_SIZE);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+
+ rmem->pg_tbl = mz->addr;
+ rmem->pg_tbl_map = mz_phys_addr;
+ rmem->pg_tbl_mz = mz;
+ }
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
+ suffix, idx, bp->eth_dev->data->port_id);
+ mz = rte_memzone_lookup(mz_name);
+ if (!mz) {
+ mz = rte_memzone_reserve_aligned(mz_name,
+ mem_size,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_1GB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG,
+ BNXT_PAGE_SIZE);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+
+ for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
+ rmem->pg_arr[i] = ((char *)mz->addr) + sz;
+ rmem->dma_arr[i] = mz_phys_addr + sz;
+
+ if (rmem->nr_pages > 1) {
+ if (i == rmem->nr_pages - 2 &&
+ (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+ valid_bits |= PTU_PTE_NEXT_TO_LAST;
+ else if (i == rmem->nr_pages - 1 &&
+ (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+ valid_bits |= PTU_PTE_LAST;
+
+ rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
+ valid_bits);
+ }
+ }
+
+ rmem->mz = mz;
+ if (rmem->vmem_size)
+ rmem->vmem = (void **)mz->addr;
+ rmem->dma_arr[0] = mz_phys_addr;
+ return 0;
+}
+
+static void bnxt_free_ctx_mem(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
+ return;
+
+ bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
+ rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
+ rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
+ rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
+ rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
+ rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
+ rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
+
+ for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) {
+ if (bp->ctx->tqm_mem[i])
+ rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
+ }
+
+ rte_free(bp->ctx);
+ bp->ctx = NULL;
+}
+
+#define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1 : __min2; })
+
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1 : __max2; })
+
+#define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max)
+
+int bnxt_alloc_ctx_mem(struct bnxt *bp)
+{
+ struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_info *ctx;
+ uint32_t mem_size, ena, entries;
+ uint32_t entries_sp, min;
+ int i, rc;
+
+ rc = bnxt_hwrm_func_backing_store_qcaps(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
+ return rc;
+ }
+ ctx = bp->ctx;
+ if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
+ return 0;
+
+ ctx_pg = &ctx->qp_mem;
+ ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
+ mem_size = ctx->qp_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->srq_mem;
+ ctx_pg->entries = ctx->srq_max_l2_entries;
+ mem_size = ctx->srq_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->cq_mem;
+ ctx_pg->entries = ctx->cq_max_l2_entries;
+ mem_size = ctx->cq_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->vnic_mem;
+ ctx_pg->entries = ctx->vnic_max_vnic_entries +
+ ctx->vnic_max_ring_table_entries;
+ mem_size = ctx->vnic_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->stat_mem;
+ ctx_pg->entries = ctx->stat_max_entries;
+ mem_size = ctx->stat_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
+ if (rc)
+ return rc;
+
+ min = ctx->tqm_min_entries_per_ring;
+
+ entries_sp = ctx->qp_max_l2_entries +
+ ctx->vnic_max_vnic_entries +
+ 2 * ctx->qp_min_qp1_entries + min;
+ entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple);
+
+ entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries;
+ entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
+ entries = clamp_t(uint32_t, entries, min,
+ ctx->tqm_max_entries_per_ring);
+ for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
+ ctx_pg = ctx->tqm_mem[i];
+ ctx_pg->entries = i ? entries : entries_sp;
+ mem_size = ctx->tqm_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
+ if (rc)
+ return rc;
+ ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
+ }
+
+ ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
+ rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+ if (rc)
+ PMD_DRV_LOG(ERR,
+ "Failed to configure context mem: rc = %d\n", rc);
+ else
+ ctx->flags |= BNXT_CTX_FLAG_INITED;
+
+ return rc;
+}
+
+static int bnxt_alloc_stats_mem(struct bnxt *bp)
+{
+ struct rte_pci_device *pci_dev = bp->pdev;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz = NULL;
+ uint32_t total_alloc_len;
+ rte_iova_t mz_phys_addr;
+
+ if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
+ return 0;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "rx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
+ sizeof(struct rx_port_stats_ext) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name, total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+
+ bp->rx_mem_zone = (const void *)mz;
+ bp->hw_rx_port_stats = mz->addr;
+ bp->hw_rx_port_stats_map = mz_phys_addr;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "tx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
+ sizeof(struct tx_port_stats_ext) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name,
+ total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+
+ bp->tx_mem_zone = (const void *)mz;
+ bp->hw_tx_port_stats = mz->addr;
+ bp->hw_tx_port_stats_map = mz_phys_addr;
+ bp->flags |= BNXT_FLAG_PORT_STATS;
+
+ /* Display extended statistics if FW supports it */
+ if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
+ bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
+ !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
+ return 0;
+
+ bp->hw_rx_port_stats_ext = (void *)
+ ((uint8_t *)bp->hw_rx_port_stats +
+ sizeof(struct rx_port_stats));
+ bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
+ sizeof(struct rx_port_stats);
+ bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
+
+ if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
+ bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
+ bp->hw_tx_port_stats_ext = (void *)
+ ((uint8_t *)bp->hw_tx_port_stats +
+ sizeof(struct tx_port_stats));
+ bp->hw_tx_port_stats_ext_map =
+ bp->hw_tx_port_stats_map +
+ sizeof(struct tx_port_stats);
+ bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
+ }
+
+ return 0;
+}
+
+static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = eth_dev->data->dev_private;
+ int rc = 0;
+
+ eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
+ RTE_ETHER_ADDR_LEN *
+ bp->max_l2_ctx,
+ 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
+ return -ENOMEM;
+ }
+
+ if (!BNXT_HAS_DFLT_MAC_SET(bp)) {
+ if (BNXT_PF(bp))
+ return -EINVAL;
+
+ /* Generate a random MAC address, if none was assigned by PF */
+ PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
+ bnxt_eth_hw_addr_random(bp->mac_addr);
+ PMD_DRV_LOG(INFO,
+ "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
+ bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
+
+ rc = bnxt_hwrm_set_mac(bp);
+ if (rc)
+ return rc;
+ }
+
+ /* Copy the permanent MAC from the FUNC_QCAPS response */
+ memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
+
+ return rc;
+}
+
+static int bnxt_restore_dflt_mac(struct bnxt *bp)
+{
+ int rc = 0;
+
+ /* MAC is already configured in FW */
+ if (BNXT_HAS_DFLT_MAC_SET(bp))
+ return 0;
+
+ /* Restore the old MAC configured */
+ rc = bnxt_hwrm_set_mac(bp);
+ if (rc)
+ PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
+
+ return rc;
+}
+
+static void bnxt_config_vf_req_fwd(struct bnxt *bp)
+{
+ if (!BNXT_PF(bp))
+ return;
+
+#define ALLOW_FUNC(x) \
+ { \
+ uint32_t arg = (x); \
+ bp->pf->vf_req_fwd[((arg) >> 5)] &= \
+ ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
+ }
+
+ /* Forward all requests if firmware is new enough */
+ if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
+ (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
+ ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
+ memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd));
+ } else {
+ PMD_DRV_LOG(WARNING,
+ "Firmware too old for VF mailbox functionality\n");
+ memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
+ }
+
+ /*
+ * The following are used for driver cleanup. If we disallow these,
+ * VF drivers can't clean up cleanly.
+ */
+ ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
+ ALLOW_FUNC(HWRM_VNIC_FREE);
+ ALLOW_FUNC(HWRM_RING_FREE);
+ ALLOW_FUNC(HWRM_RING_GRP_FREE);
+ ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
+ ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
+ ALLOW_FUNC(HWRM_STAT_CTX_FREE);
+ ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
+ ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
+}
+
+uint16_t
+bnxt_get_svif(uint16_t port_id, bool func_svif,
+ enum bnxt_ulp_intf_type type)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+
+ eth_dev = &rte_eth_devices[port_id];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
+ struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ if (type == BNXT_ULP_INTF_TYPE_VF_REP)
+ return vfr->svif;
+
+ eth_dev = vfr->parent_dev;
+ }
+
+ bp = eth_dev->data->dev_private;
+
+ return func_svif ? bp->func_svif : bp->port_svif;
+}
+
+uint16_t
+bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt_vnic_info *vnic;
+ struct bnxt *bp;
+
+ eth_dev = &rte_eth_devices[port];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
+ struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ if (type == BNXT_ULP_INTF_TYPE_VF_REP)
+ return vfr->dflt_vnic_id;
+
+ eth_dev = vfr->parent_dev;
+ }
+
+ bp = eth_dev->data->dev_private;
+
+ vnic = BNXT_GET_DEFAULT_VNIC(bp);
+
+ return vnic->fw_vnic_id;
+}
+
+uint16_t
+bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+
+ eth_dev = &rte_eth_devices[port];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
+ struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ if (type == BNXT_ULP_INTF_TYPE_VF_REP)
+ return vfr->fw_fid;
+
+ eth_dev = vfr->parent_dev;
+ }
+
+ bp = eth_dev->data->dev_private;
+
+ return bp->fw_fid;
+}
+
+enum bnxt_ulp_intf_type
+bnxt_get_interface_type(uint16_t port)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+
+ eth_dev = &rte_eth_devices[port];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev))
+ return BNXT_ULP_INTF_TYPE_VF_REP;
+
+ bp = eth_dev->data->dev_private;
+ if (BNXT_PF(bp))
+ return BNXT_ULP_INTF_TYPE_PF;
+ else if (BNXT_VF_IS_TRUSTED(bp))
+ return BNXT_ULP_INTF_TYPE_TRUSTED_VF;
+ else if (BNXT_VF(bp))
+ return BNXT_ULP_INTF_TYPE_VF;
+
+ return BNXT_ULP_INTF_TYPE_INVALID;
+}
+
+uint16_t
+bnxt_get_phy_port_id(uint16_t port_id)
+{
+ struct bnxt_vf_representor *vfr;
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+
+ eth_dev = &rte_eth_devices[port_id];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
+ vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ eth_dev = vfr->parent_dev;
+ }
+
+ bp = eth_dev->data->dev_private;
+
+ return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id;
+}
+
+uint16_t
+bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+
+ eth_dev = &rte_eth_devices[port_id];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
+ struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ if (type == BNXT_ULP_INTF_TYPE_VF_REP)
+ return vfr->fw_fid - 1;
+
+ eth_dev = vfr->parent_dev;
+ }
+
+ bp = eth_dev->data->dev_private;
+
+ return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1;
+}
+
+uint16_t
+bnxt_get_vport(uint16_t port_id)
+{
+ return (1 << bnxt_get_phy_port_id(port_id));
+}
+
+static void bnxt_alloc_error_recovery_info(struct bnxt *bp)
+{
+ struct bnxt_error_recovery_info *info = bp->recovery_info;
+
+ if (info) {
+ if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))
+ memset(info, 0, sizeof(*info));
+ return;
+ }
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ return;
+
+ info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
+ sizeof(*info), 0);
+ if (!info)
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+
+ bp->recovery_info = info;
+}
+
+static void bnxt_check_fw_status(struct bnxt *bp)
+{
+ uint32_t fw_status;
+
+ if (!(bp->recovery_info &&
+ (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)))
+ return;
+
+ fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
+ if (fw_status != BNXT_FW_STATUS_HEALTHY)
+ PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n",
+ fw_status);
+}
+
+static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp)
+{
+ struct bnxt_error_recovery_info *info = bp->recovery_info;
+ uint32_t status_loc;
+ uint32_t sig_ver;
+
+ rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
+ sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ BNXT_GRCP_WINDOW_2_BASE +
+ offsetof(struct hcomm_status,
+ sig_ver)));
+ /* If the signature is absent, then FW does not support this feature */
+ if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) !=
+ HCOMM_STATUS_SIGNATURE_VAL)
+ return 0;
+
+ if (!info) {
+ info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
+ sizeof(*info), 0);
+ if (!info)
+ return -ENOMEM;
+ bp->recovery_info = info;
+ } else {
+ memset(info, 0, sizeof(*info));
+ }
+
+ status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ BNXT_GRCP_WINDOW_2_BASE +
+ offsetof(struct hcomm_status,
+ fw_status_loc)));
+
+ /* Only pre-map the FW health status GRC register */
+ if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC)
+ return 0;
+
+ info->status_regs[BNXT_FW_STATUS_REG] = status_loc;
+ info->mapped_status_regs[BNXT_FW_STATUS_REG] =
+ BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK);
+
+ rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
+
+ bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS;
+
+ return 0;
+}
+
+static int bnxt_init_fw(struct bnxt *bp)
+{
+ uint16_t mtu;
+ int rc = 0;
+
+ bp->fw_cap = 0;
+
+ rc = bnxt_map_hcomm_fw_status_reg(bp);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT);
+ if (rc) {
+ bnxt_check_fw_status(bp);
+ return rc;
+ }
+
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc)
+ return -EIO;
+
+ rc = bnxt_hwrm_vnic_qcaps(bp);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_queue_qportcfg(bp);
+ if (rc)
+ return rc;
+
+ /* Get the MAX capabilities for this function.
+ * This function also allocates context memory for TQM rings and
+ * informs the firmware about this allocated backing store memory.
+ */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_func_qcfg(bp, &mtu);
+ if (rc)
+ return rc;
+
+ bnxt_hwrm_port_mac_qcfg(bp);
+
+ bnxt_hwrm_parent_pf_qcfg(bp);
+
+ bnxt_hwrm_port_phy_qcaps(bp);
+
+ rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
+ if (rc)
+ return rc;
+
+ bnxt_alloc_error_recovery_info(bp);
+ /* Get the adapter error recovery support info */
+ rc = bnxt_hwrm_error_recovery_qcfg(bp);
+ if (rc)
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+
+ bnxt_hwrm_port_led_qcaps(bp);
+
+ return 0;
+}
+
+static int
+bnxt_init_locks(struct bnxt *bp)
+{
+ int err;
+
+ err = pthread_mutex_init(&bp->flow_lock, NULL);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
+ return err;
+ }
+
+ err = pthread_mutex_init(&bp->def_cp_lock, NULL);
+ if (err)
+ PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
+ return err;
+}
+
+static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
+{
+ int rc = 0;
+
+ rc = bnxt_init_fw(bp);
+ if (rc)
+ return rc;
+
+ if (!reconfig_dev) {
+ rc = bnxt_setup_mac_addr(bp->eth_dev);
+ if (rc)
+ return rc;
+ } else {
+ rc = bnxt_restore_dflt_mac(bp);
+ if (rc)
+ return rc;
+ }
+
+ bnxt_config_vf_req_fwd(bp);
+
+ rc = bnxt_hwrm_func_driver_register(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to register driver");
+ return -EBUSY;
+ }
+
+ if (BNXT_PF(bp)) {
+ if (bp->pdev->max_vfs) {
+ rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
+ return rc;
+ }
+ } else {
+ rc = bnxt_hwrm_allocate_pf_only(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate PF resources");
+ return rc;
+ }
+ }
+ }
+
+ rc = bnxt_alloc_mem(bp, reconfig_dev);
+ if (rc)
+ return rc;
+
+ rc = bnxt_setup_int(bp);
+ if (rc)
+ return rc;
+
+ rc = bnxt_request_int(bp);
+ if (rc)
+ return rc;
+
+ rc = bnxt_init_ctx_mem(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n");
+ return rc;
+ }
+
+ rc = bnxt_init_locks(bp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int
+bnxt_parse_devarg_truflow(__rte_unused const char *key,
+ const char *value, void *opaque_arg)
+{
+ struct bnxt *bp = opaque_arg;
+ unsigned long truflow;
+ char *end = NULL;
+
+ if (!value || !opaque_arg) {
+ PMD_DRV_LOG(ERR,
+ "Invalid parameter passed to truflow devargs.\n");
+ return -EINVAL;
+ }
+
+ truflow = strtoul(value, &end, 10);
+ if (end == NULL || *end != '\0' ||
+ (truflow == ULONG_MAX && errno == ERANGE)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid parameter passed to truflow devargs.\n");
+ return -EINVAL;
+ }
+
+ if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid value passed to truflow devargs.\n");
+ return -EINVAL;
+ }
+
+ bp->flags |= BNXT_FLAG_TRUFLOW_EN;
+ if (BNXT_TRUFLOW_EN(bp))
+ PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n");
+
+ return 0;
+}
+
+static int
+bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
+ const char *value, void *opaque_arg)
+{
+ struct bnxt *bp = opaque_arg;
+ unsigned long flow_xstat;
+ char *end = NULL;
+
+ if (!value || !opaque_arg) {
+ PMD_DRV_LOG(ERR,
+ "Invalid parameter passed to flow_xstat devarg.\n");
+ return -EINVAL;
+ }
+
+ flow_xstat = strtoul(value, &end, 10);
+ if (end == NULL || *end != '\0' ||
+ (flow_xstat == ULONG_MAX && errno == ERANGE)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid parameter passed to flow_xstat devarg.\n");
+ return -EINVAL;
+ }
+
+ if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid value passed to flow_xstat devarg.\n");
+ return -EINVAL;
+ }
+
+ bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN;
+ if (BNXT_FLOW_XSTATS_EN(bp))
+ PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n");
+
+ return 0;
+}
+
+static int
+bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key,
+ const char *value, void *opaque_arg)
+{
+ struct bnxt *bp = opaque_arg;
+ unsigned long max_num_kflows;
+ char *end = NULL;
+
+ if (!value || !opaque_arg) {
+ PMD_DRV_LOG(ERR,
+ "Invalid parameter passed to max_num_kflows devarg.\n");
+ return -EINVAL;
+ }
+
+ max_num_kflows = strtoul(value, &end, 10);
+ if (end == NULL || *end != '\0' ||
+ (max_num_kflows == ULONG_MAX && errno == ERANGE)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid parameter passed to max_num_kflows devarg.\n");
+ return -EINVAL;
+ }
+
+ if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid value passed to max_num_kflows devarg.\n");
+ return -EINVAL;
+ }
+
+ bp->max_num_kflows = max_num_kflows;
+ if (bp->max_num_kflows)
+ PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n",
+ max_num_kflows);
+
+ return 0;
+}
+
+static void
+bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+
+ if (devargs == NULL)
+ return;
+
+ kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args);
+ if (kvlist == NULL)
+ return;
+
+ /*
+ * Handler for "truflow" devarg.
+ * Invoked as for ex: "-w 0000:00:0d.0,host-based-truflow=1"
+ */
+ rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW,
+ bnxt_parse_devarg_truflow, bp);
+
+ /*
+ * Handler for "flow_xstat" devarg.
+ * Invoked as for ex: "-w 0000:00:0d.0,flow_xstat=1"
+ */
+ rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT,
+ bnxt_parse_devarg_flow_xstat, bp);
+
+ /*
+ * Handler for "max_num_kflows" devarg.
+ * Invoked as for ex: "-w 000:00:0d.0,max_num_kflows=32"
+ */
+ rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS,
+ bnxt_parse_devarg_max_num_kflows, bp);
+
+ rte_kvargs_free(kvlist);
+}
+
+static int bnxt_alloc_switch_domain(struct bnxt *bp)
+{
+ int rc = 0;
+
+ if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
+ rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id);
+ if (rc)
+ PMD_DRV_LOG(ERR,
+ "Failed to alloc switch domain: %d\n", rc);
+ else
+ PMD_DRV_LOG(INFO,
+ "Switch domain allocated %d\n",
+ bp->switch_domain_id);
+ }
+
+ return rc;
+}
+
+static int
+bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ static int version_printed;