struct bnxt_ctx_mem_buf_info tx_fc_out_tbl;
};
+struct bnxt_ring_stats {
+ /* Number of transmitted unicast packets */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of packets discarded in transmit path */
+ uint64_t tx_discard_pkts;
+ /* Number of packets in transmit path with error */
+ uint64_t tx_error_pkts;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of received unicast packets */
+ uint64_t rx_ucast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of packets discarded in receive path */
+ uint64_t rx_discard_pkts;
+ /* Number of packets in receive path with errors */
+ uint64_t rx_error_pkts;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of aggregated unicast packets */
+ uint64_t rx_agg_pkts;
+ /* Number of aggregated unicast bytes */
+ uint64_t rx_agg_bytes;
+ /* Number of aggregation events */
+ uint64_t rx_agg_events;
+ /* Number of aborted aggregations */
+ uint64_t rx_agg_aborts;
+};
+
struct bnxt {
void *bar0;
struct bnxt_flow_stat_info *flow_stat;
uint16_t max_num_kflows;
uint16_t tx_cfa_action;
+ struct bnxt_ring_stats *prev_rx_ring_stats;
+ struct bnxt_ring_stats *prev_tx_ring_stats;
};
static
return rc;
}
+static void bnxt_free_prev_ring_stats(struct bnxt *bp)
+{
+ rte_free(bp->prev_rx_ring_stats);
+ rte_free(bp->prev_tx_ring_stats);
+
+ bp->prev_rx_ring_stats = NULL;
+ bp->prev_tx_ring_stats = NULL;
+}
+
+static int bnxt_alloc_prev_ring_stats(struct bnxt *bp)
+{
+ bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats",
+ sizeof(struct bnxt_ring_stats) *
+ bp->rx_cp_nr_rings,
+ 0);
+ if (bp->prev_rx_ring_stats == NULL)
+ return -ENOMEM;
+
+ bp->prev_tx_ring_stats = rte_zmalloc("bnxt_prev_tx_ring_stats",
+ sizeof(struct bnxt_ring_stats) *
+ bp->tx_cp_nr_rings,
+ 0);
+ if (bp->prev_tx_ring_stats == NULL)
+ goto error;
+
+ return 0;
+
+error:
+ bnxt_free_prev_ring_stats(bp);
+ return -ENOMEM;
+}
+
static int bnxt_start_nic(struct bnxt *bp)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
bnxt_shutdown_nic(bp);
bnxt_hwrm_if_change(bp, false);
+ bnxt_free_prev_ring_stats(bp);
rte_free(bp->mark_table);
bp->mark_table = NULL;
if (rc)
goto error;
+ rc = bnxt_alloc_prev_ring_stats(bp);
+ if (rc)
+ goto error;
+
eth_dev->data->dev_started = 1;
bnxt_link_update_op(eth_dev, 1);
return rc;
}
-int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
- struct rte_eth_stats *stats, uint8_t rx)
+static void bnxt_update_prev_stat(uint64_t *cntr, uint64_t *prev_cntr)
+{
+ /* One of the HW stat values that make up this counter was zero as
+ * returned by HW in this iteration, so use the previous
+ * iteration's counter value
+ */
+ if (*prev_cntr && *cntr == 0)
+ *cntr = *prev_cntr;
+ else
+ *prev_cntr = *cntr;
+}
+
+int bnxt_hwrm_ring_stats(struct bnxt *bp, uint32_t cid, int idx,
+ struct bnxt_ring_stats *ring_stats, bool rx)
{
int rc = 0;
struct hwrm_stat_ctx_query_input req = {.req_type = 0};
HWRM_CHECK_RESULT();
if (rx) {
- stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
- stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
- stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
- stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
- stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
- stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
- stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_discard_pkts);
- stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_error_pkts);
+ struct bnxt_ring_stats *prev_stats = &bp->prev_rx_ring_stats[idx];
+
+ ring_stats->rx_ucast_pkts = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_ucast_pkts,
+ &prev_stats->rx_ucast_pkts);
+
+ ring_stats->rx_mcast_pkts = rte_le_to_cpu_64(resp->rx_mcast_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_mcast_pkts,
+ &prev_stats->rx_mcast_pkts);
+
+ ring_stats->rx_bcast_pkts = rte_le_to_cpu_64(resp->rx_bcast_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_bcast_pkts,
+ &prev_stats->rx_bcast_pkts);
+
+ ring_stats->rx_ucast_bytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+ bnxt_update_prev_stat(&ring_stats->rx_ucast_bytes,
+ &prev_stats->rx_ucast_bytes);
+
+ ring_stats->rx_mcast_bytes = rte_le_to_cpu_64(resp->rx_mcast_bytes);
+ bnxt_update_prev_stat(&ring_stats->rx_mcast_bytes,
+ &prev_stats->rx_mcast_bytes);
+
+ ring_stats->rx_bcast_bytes = rte_le_to_cpu_64(resp->rx_bcast_bytes);
+ bnxt_update_prev_stat(&ring_stats->rx_bcast_bytes,
+ &prev_stats->rx_bcast_bytes);
+
+ ring_stats->rx_discard_pkts = rte_le_to_cpu_64(resp->rx_discard_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_discard_pkts,
+ &prev_stats->rx_discard_pkts);
+
+ ring_stats->rx_error_pkts = rte_le_to_cpu_64(resp->rx_error_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_error_pkts,
+ &prev_stats->rx_error_pkts);
+
+ ring_stats->rx_agg_pkts = rte_le_to_cpu_64(resp->rx_agg_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_agg_pkts,
+ &prev_stats->rx_agg_pkts);
+
+ ring_stats->rx_agg_bytes = rte_le_to_cpu_64(resp->rx_agg_bytes);
+ bnxt_update_prev_stat(&ring_stats->rx_agg_bytes,
+ &prev_stats->rx_agg_bytes);
+
+ ring_stats->rx_agg_events = rte_le_to_cpu_64(resp->rx_agg_events);
+ bnxt_update_prev_stat(&ring_stats->rx_agg_events,
+ &prev_stats->rx_agg_events);
+
+ ring_stats->rx_agg_aborts = rte_le_to_cpu_64(resp->rx_agg_aborts);
+ bnxt_update_prev_stat(&ring_stats->rx_agg_aborts,
+ &prev_stats->rx_agg_aborts);
} else {
- stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
- stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
- stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
- stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
- stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
- stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+ struct bnxt_ring_stats *prev_stats = &bp->prev_tx_ring_stats[idx];
+
+ ring_stats->tx_ucast_pkts = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+ bnxt_update_prev_stat(&ring_stats->tx_ucast_pkts,
+ &prev_stats->tx_ucast_pkts);
+
+ ring_stats->tx_mcast_pkts = rte_le_to_cpu_64(resp->tx_mcast_pkts);
+ bnxt_update_prev_stat(&ring_stats->tx_mcast_pkts,
+ &prev_stats->tx_mcast_pkts);
+
+ ring_stats->tx_bcast_pkts = rte_le_to_cpu_64(resp->tx_bcast_pkts);
+ bnxt_update_prev_stat(&ring_stats->tx_bcast_pkts,
+ &prev_stats->tx_bcast_pkts);
+
+ ring_stats->tx_ucast_bytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+ bnxt_update_prev_stat(&ring_stats->tx_ucast_bytes,
+ &prev_stats->tx_ucast_bytes);
+
+ ring_stats->tx_mcast_bytes = rte_le_to_cpu_64(resp->tx_mcast_bytes);
+ bnxt_update_prev_stat(&ring_stats->tx_mcast_bytes,
+ &prev_stats->tx_mcast_bytes);
+
+ ring_stats->tx_bcast_bytes = rte_le_to_cpu_64(resp->tx_bcast_bytes);
+ bnxt_update_prev_stat(&ring_stats->tx_bcast_bytes,
+ &prev_stats->tx_bcast_bytes);
+
+ ring_stats->tx_discard_pkts = rte_le_to_cpu_64(resp->tx_discard_pkts);
+ bnxt_update_prev_stat(&ring_stats->tx_discard_pkts,
+ &prev_stats->tx_discard_pkts);
}
HWRM_UNLOCK();
int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx);
int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
-int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
- struct rte_eth_stats *stats, uint8_t rx);
-
int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout);
int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
uint32_t echo_req_data2);
int bnxt_hwrm_poll_ver_get(struct bnxt *bp);
int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int queue_index);
+int bnxt_hwrm_ring_stats(struct bnxt *bp, uint32_t cid, int idx,
+ struct bnxt_ring_stats *stats, bool rx);
#endif
}
}
+static void bnxt_fill_rte_eth_stats(struct rte_eth_stats *stats,
+ struct bnxt_ring_stats *ring_stats,
+ unsigned int i, bool rx)
+{
+ if (rx) {
+ stats->q_ipackets[i] = ring_stats->rx_ucast_pkts;
+ stats->q_ipackets[i] += ring_stats->rx_mcast_pkts;
+ stats->q_ipackets[i] += ring_stats->rx_bcast_pkts;
+
+ stats->ipackets += stats->q_ipackets[i];
+
+ stats->q_ibytes[i] = ring_stats->rx_ucast_bytes;
+ stats->q_ibytes[i] += ring_stats->rx_mcast_bytes;
+ stats->q_ibytes[i] += ring_stats->rx_bcast_bytes;
+
+ stats->ibytes += stats->q_ibytes[i];
+
+ stats->q_errors[i] = ring_stats->rx_discard_pkts;
+ stats->q_errors[i] += ring_stats->rx_error_pkts;
+
+ stats->imissed += ring_stats->rx_discard_pkts;
+ stats->ierrors += ring_stats->rx_error_pkts;
+ } else {
+ stats->q_opackets[i] = ring_stats->tx_ucast_pkts;
+ stats->q_opackets[i] += ring_stats->tx_mcast_pkts;
+ stats->q_opackets[i] += ring_stats->tx_bcast_pkts;
+
+ stats->opackets += stats->q_opackets[i];
+
+ stats->q_obytes[i] = ring_stats->tx_ucast_bytes;
+ stats->q_obytes[i] += ring_stats->tx_mcast_bytes;
+ stats->q_obytes[i] += ring_stats->tx_bcast_bytes;
+
+ stats->obytes += stats->q_obytes[i];
+
+ stats->oerrors += ring_stats->tx_discard_pkts;
+ }
+}
+
int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
- struct rte_eth_stats *bnxt_stats)
+ struct rte_eth_stats *bnxt_stats)
{
int rc = 0;
unsigned int i;
for (i = 0; i < num_q_stats; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_ring_stats ring_stats = {0};
if (!rxq->rx_started)
continue;
- rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i,
- bnxt_stats, 1);
+
+ rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
+ &ring_stats, true);
if (unlikely(rc))
return rc;
+
+ bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
rte_atomic64_read(&rxq->rx_mbuf_alloc_fail);
}
for (i = 0; i < num_q_stats; i++) {
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ struct bnxt_ring_stats ring_stats = {0};
if (!txq->tx_started)
continue;
- rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i,
- bnxt_stats, 0);
+
+ rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
+ &ring_stats, false);
if (unlikely(rc))
return rc;
+
+ bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, false);
}
- rc = bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats, NULL);
return rc;
}
return ret;
}
+static void bnxt_fill_func_qstats(struct hwrm_func_qstats_output *func_qstats,
+ struct bnxt_ring_stats *ring_stats,
+ bool rx)
+{
+ if (rx) {
+ func_qstats->rx_ucast_pkts += ring_stats->rx_ucast_pkts;
+ func_qstats->rx_mcast_pkts += ring_stats->rx_mcast_pkts;
+ func_qstats->rx_bcast_pkts += ring_stats->rx_bcast_pkts;
+
+ func_qstats->rx_ucast_bytes += ring_stats->rx_ucast_bytes;
+ func_qstats->rx_mcast_bytes += ring_stats->rx_mcast_bytes;
+ func_qstats->rx_bcast_bytes += ring_stats->rx_bcast_bytes;
+
+ func_qstats->rx_discard_pkts += ring_stats->rx_discard_pkts;
+ func_qstats->rx_drop_pkts += ring_stats->rx_error_pkts;
+
+ func_qstats->rx_agg_pkts += ring_stats->rx_agg_pkts;
+ func_qstats->rx_agg_bytes += ring_stats->rx_agg_bytes;
+ func_qstats->rx_agg_events += ring_stats->rx_agg_events;
+ func_qstats->rx_agg_aborts += ring_stats->rx_agg_aborts;
+ } else {
+ func_qstats->tx_ucast_pkts += ring_stats->tx_ucast_pkts;
+ func_qstats->tx_mcast_pkts += ring_stats->tx_mcast_pkts;
+ func_qstats->tx_bcast_pkts += ring_stats->tx_bcast_pkts;
+
+ func_qstats->tx_ucast_bytes += ring_stats->tx_ucast_bytes;
+ func_qstats->tx_mcast_bytes += ring_stats->tx_mcast_bytes;
+ func_qstats->tx_bcast_bytes += ring_stats->tx_bcast_bytes;
+
+ func_qstats->tx_drop_pkts += ring_stats->tx_error_pkts;
+ func_qstats->tx_discard_pkts += ring_stats->tx_discard_pkts;
+ }
+}
+
int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
if (n < stat_count || xstats == NULL)
return stat_count;
- bnxt_hwrm_func_qstats(bp, 0xffff, NULL, &func_qstats);
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_ring_stats ring_stats = {0};
+
+ if (!rxq->rx_started)
+ continue;
+
+ rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
+ &ring_stats, true);
+ if (unlikely(rc))
+ return rc;
+
+ bnxt_fill_func_qstats(&func_qstats, &ring_stats, true);
+ }
+
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ struct bnxt_ring_stats ring_stats = {0};
+
+ if (!txq->tx_started)
+ continue;
+
+ rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
+ &ring_stats, false);
+ if (unlikely(rc))
+ return rc;
+
+ bnxt_fill_func_qstats(&func_qstats, &ring_stats, false);
+ }
+
bnxt_hwrm_port_qstats(bp);
bnxt_hwrm_ext_port_qstats(bp);
rx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_rx_ext_stats_strings),
for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) {
xstats[count].id = count;
- xstats[count].value =
- rte_le_to_cpu_64(*(uint64_t *)((char *)&func_qstats +
- bnxt_func_stats_strings[i].offset));
+ xstats[count].value = *(uint64_t *)((char *)&func_qstats +
+ bnxt_func_stats_strings[i].offset);
count++;
}
-
for (i = 0; i < rx_port_stats_ext_cnt; i++) {
uint64_t *rx_stats_ext = (uint64_t *)bp->hw_rx_port_stats_ext;