+
+/* Update the input context memory with the flow counter IDs
+ * of the flows that we are interested in.
+ * Also, update the output tables with the current local values
+ * since that is what will be used by FW to accumulate
+ */
+static void bnxt_update_fc_pre_qstat(uint32_t *in_tbl,
+ uint64_t *out_tbl,
+ struct bnxt_filter_info *filter,
+ uint32_t *ptbl_cnt)
+{
+ uint32_t in_tbl_cnt = *ptbl_cnt;
+
+ in_tbl[in_tbl_cnt] = filter->flow_id;
+ out_tbl[2 * in_tbl_cnt] = filter->hw_stats.packets;
+ out_tbl[2 * in_tbl_cnt + 1] = filter->hw_stats.bytes;
+ in_tbl_cnt++;
+ *ptbl_cnt = in_tbl_cnt;
+}
+
+/* Post issuing counter_qstats cmd, update the driver's local stat
+ * entries with the values DMA-ed by FW in the output table
+ */
+static void bnxt_update_fc_post_qstat(struct bnxt_filter_info *filter,
+ uint64_t *out_tbl,
+ uint32_t out_tbl_idx)
+{
+ filter->hw_stats.packets = out_tbl[2 * out_tbl_idx];
+ filter->hw_stats.bytes = out_tbl[(2 * out_tbl_idx) + 1];
+}
+
+static int bnxt_update_fc_tbl(struct bnxt *bp, uint16_t ctr,
+ struct bnxt_filter_info *en_tbl[],
+ uint16_t in_flow_cnt)
+{
+ uint32_t *in_rx_tbl;
+ uint64_t *out_rx_tbl;
+ uint32_t in_rx_tbl_cnt = 0;
+ uint32_t out_rx_tbl_cnt = 0;
+ int i, rc = 0;
+
+ in_rx_tbl = (uint32_t *)bp->flow_stat->rx_fc_in_tbl.va;
+ out_rx_tbl = (uint64_t *)bp->flow_stat->rx_fc_out_tbl.va;
+
+ for (i = 0; i < in_flow_cnt; i++) {
+ if (!en_tbl[i])
+ continue;
+
+ /* Currently only ingress/Rx flows are supported anyway. */
+ bnxt_update_fc_pre_qstat(in_rx_tbl, out_rx_tbl,
+ en_tbl[i], &in_rx_tbl_cnt);
+ }
+
+ /* Currently only ingress/Rx flows are supported */
+ if (in_rx_tbl_cnt) {
+ rc = bnxt_hwrm_cfa_counter_qstats(bp, BNXT_DIR_RX, ctr,
+ in_rx_tbl_cnt);
+ if (rc)
+ return rc;
+ }
+
+ for (i = 0; i < in_flow_cnt; i++) {
+ if (!en_tbl[i])
+ continue;
+
+ /* Currently only ingress/Rx flows are supported */
+ bnxt_update_fc_post_qstat(en_tbl[i], out_rx_tbl,
+ out_rx_tbl_cnt);
+ out_rx_tbl_cnt++;
+ }
+
+ return rc;
+}
+
+/* Walks through the list which has all the flows
+ * requesting for explicit flow counters.
+ */
+int bnxt_flow_stats_req(struct bnxt *bp)
+{
+ int i;
+ int rc = 0;
+ struct rte_flow *flow;
+ uint16_t in_flow_tbl_cnt = 0;
+ struct bnxt_vnic_info *vnic = NULL;
+ struct bnxt_filter_info *valid_en_tbl[bp->flow_stat->max_fc];
+ uint16_t counter_type = CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC;
+
+ bnxt_acquire_flow_lock(bp);
+ for (i = 0; i < bp->max_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
+ continue;
+
+ if (STAILQ_EMPTY(&vnic->flow_list))
+ continue;
+
+ STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+ if (!flow || !flow->filter)
+ continue;
+
+ valid_en_tbl[in_flow_tbl_cnt++] = flow->filter;
+ if (in_flow_tbl_cnt >= bp->flow_stat->max_fc) {
+ rc = bnxt_update_fc_tbl(bp, counter_type,
+ valid_en_tbl,
+ in_flow_tbl_cnt);
+ if (rc)
+ goto err;
+ in_flow_tbl_cnt = 0;
+ continue;
+ }
+ }
+ }
+
+ if (!in_flow_tbl_cnt) {
+ bnxt_release_flow_lock(bp);
+ goto out;
+ }
+
+ rc = bnxt_update_fc_tbl(bp, counter_type, valid_en_tbl,
+ in_flow_tbl_cnt);
+ if (!rc) {
+ bnxt_release_flow_lock(bp);
+ return 0;
+ }
+
+err:
+ /* If cmd fails once, no need of
+ * invoking again every second
+ */
+ bnxt_release_flow_lock(bp);
+ bnxt_cancel_fc_thread(bp);
+out:
+ return rc;
+}