bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
- bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+ bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
+ bp->max_l2_ctx =
+ rte_le_to_cpu_16(resp->max_l2_ctxs) + bp->max_rx_em_flows;
/* TODO: For now, do not support VMDq/RFS on VFs. */
if (BNXT_PF(bp)) {
if (bp->pf.max_vfs)
}
}
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
+ bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
+
HWRM_UNLOCK();
return rc;
bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
- bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ /* func_resource_qcaps does not return max_rx_em_flows.
+ * So use the value provided by func_qcaps.
+ */
+ bp->max_l2_ctx =
+ rte_le_to_cpu_16(resp->max_l2_ctxs) +
+ bp->max_rx_em_flows;
bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
}
cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
} else {
cpr = bp->rx_queues[i]->cp_ring;
- bp->grp_info[i].fw_stats_ctx = -1;
+ if (BNXT_HAS_RING_GRPS(bp))
+ bp->grp_info[i].fw_stats_ctx = -1;
}
if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
sizeof(*cpr->cp_desc_ring));
cpr->cp_raw_cons = 0;
+ cpr->valid = 0;
}
void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
bnxt_hwrm_ring_free(bp, ring,
HWRM_RING_FREE_INPUT_RING_TYPE_RX);
ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
+ if (BNXT_HAS_RING_GRPS(bp))
+ bp->grp_info[queue_index].rx_fw_ring_id =
+ INVALID_HW_RING_ID;
memset(rxr->rx_desc_ring, 0,
rxr->rx_ring_struct->ring_size *
sizeof(*rxr->rx_desc_ring));
rxr->ag_ring_struct->ring_size *
sizeof(*rxr->ag_buf_ring));
rxr->ag_prod = 0;
- bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
+ if (BNXT_HAS_RING_GRPS(bp))
+ bp->grp_info[queue_index].ag_fw_ring_id =
+ INVALID_HW_RING_ID;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr);
bnxt_free_nq_ring(bp, rxq->nq_ring);
}
- bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
+ if (BNXT_HAS_RING_GRPS(bp))
+ bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
}
int bnxt_free_all_hwrm_rings(struct bnxt *bp)
req.port_id = rte_cpu_to_le_16(pf->port_id);
if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
req.tx_stat_host_addr =
- rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
+ rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
req.tx_stat_size =
rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
}
if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
req.rx_stat_host_addr =
- rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
+ rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
req.rx_stat_size =
rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
}
return rc;
}
+
+int bnxt_hwrm_set_mac(struct bnxt *bp)
+{
+ struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_vf_cfg_input req = {0};
+ int rc = 0;
+
+ if (!BNXT_VF(bp))
+ return 0;
+
+ HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
+
+ req.enables =
+ rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+
+ memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
+ HWRM_UNLOCK();
+
+ return rc;
+}