1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2021 Xilinx, Inc.
6 #include <rte_bitmap.h>
11 #include "sfc_sw_stats.h"
13 enum sfc_sw_stats_type {
18 typedef uint64_t sfc_get_sw_stat_val_t(struct sfc_adapter *sa, uint16_t qid);
20 struct sfc_sw_stat_descr {
22 enum sfc_sw_stats_type type;
23 sfc_get_sw_stat_val_t *get_val;
26 static sfc_get_sw_stat_val_t sfc_get_sw_stat_val_rx_dbells;
28 sfc_get_sw_stat_val_rx_dbells(struct sfc_adapter *sa, uint16_t qid)
30 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
31 struct sfc_rxq_info *rxq_info;
33 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, qid);
34 if (rxq_info->state & SFC_RXQ_INITIALIZED)
35 return rxq_info->dp->dpq.rx_dbells;
39 static sfc_get_sw_stat_val_t sfc_get_sw_stat_val_tx_dbells;
41 sfc_get_sw_stat_val_tx_dbells(struct sfc_adapter *sa, uint16_t qid)
43 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
44 struct sfc_txq_info *txq_info;
46 txq_info = sfc_txq_info_by_ethdev_qid(sas, qid);
47 if (txq_info->state & SFC_TXQ_INITIALIZED)
48 return txq_info->dp->dpq.tx_dbells;
52 const struct sfc_sw_stat_descr sfc_sw_stats_descr[] = {
55 .type = SFC_SW_STATS_RX,
56 .get_val = sfc_get_sw_stat_val_rx_dbells,
60 .type = SFC_SW_STATS_TX,
61 .get_val = sfc_get_sw_stat_val_tx_dbells,
66 sfc_sw_stat_get_name(struct sfc_adapter *sa,
67 const struct sfc_sw_stat_descr *sw_stat, char *name,
68 size_t name_size, unsigned int id_off)
73 switch (sw_stat->type) {
81 sfc_err(sa, "%s: unknown software statistics type %d",
82 __func__, sw_stat->type);
87 ret = snprintf(name, name_size, "%s_%s", prefix,
89 if (ret < 0 || ret >= (int)name_size) {
90 sfc_err(sa, "%s: failed to fill xstat name %s_%s, err %d",
91 __func__, prefix, sw_stat->name, ret);
92 return ret > 0 ? -EINVAL : ret;
95 uint16_t qid = id_off - 1;
96 ret = snprintf(name, name_size, "%s_q%u_%s", prefix, qid,
98 if (ret < 0 || ret >= (int)name_size) {
99 sfc_err(sa, "%s: failed to fill xstat name %s_q%u_%s, err %d",
100 __func__, prefix, qid, sw_stat->name, ret);
101 return ret > 0 ? -EINVAL : ret;
109 sfc_sw_stat_get_queue_count(struct sfc_adapter *sa,
110 const struct sfc_sw_stat_descr *sw_stat)
112 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
114 switch (sw_stat->type) {
115 case SFC_SW_STATS_RX:
116 return sas->ethdev_rxq_count;
117 case SFC_SW_STATS_TX:
118 return sas->ethdev_txq_count;
120 sfc_err(sa, "%s: unknown software statistics type %d",
121 __func__, sw_stat->type);
127 sfc_sw_xstat_per_queue_get_count(unsigned int nb_queues)
129 /* Take into account the total xstat of all queues */
130 return nb_queues > 0 ? 1 + nb_queues : 0;
134 sfc_sw_xstat_get_nb_supported(struct sfc_adapter *sa,
135 const struct sfc_sw_stat_descr *sw_stat)
137 unsigned int nb_queues;
139 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
140 return sfc_sw_xstat_per_queue_get_count(nb_queues);
144 sfc_sw_stat_get_names(struct sfc_adapter *sa,
145 const struct sfc_sw_stat_descr *sw_stat,
146 struct rte_eth_xstat_name *xstats_names,
147 unsigned int xstats_names_sz,
148 unsigned int *nb_written,
149 unsigned int *nb_supported)
151 const size_t name_size = sizeof(xstats_names[0].name);
152 unsigned int id_base = *nb_supported;
153 unsigned int nb_queues;
157 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
160 *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
163 * The order of each software xstat type is the total xstat
164 * followed by per-queue xstats.
166 if (*nb_written < xstats_names_sz) {
167 rc = sfc_sw_stat_get_name(sa, sw_stat,
168 xstats_names[*nb_written].name,
169 name_size, *nb_written - id_base);
175 for (qid = 0; qid < nb_queues; ++qid) {
176 if (*nb_written < xstats_names_sz) {
177 rc = sfc_sw_stat_get_name(sa, sw_stat,
178 xstats_names[*nb_written].name,
179 name_size, *nb_written - id_base);
190 sfc_sw_xstat_get_names_by_id(struct sfc_adapter *sa,
191 const struct sfc_sw_stat_descr *sw_stat,
193 struct rte_eth_xstat_name *xstats_names,
195 unsigned int *nb_supported)
197 const size_t name_size = sizeof(xstats_names[0].name);
198 unsigned int id_base = *nb_supported;
199 unsigned int nb_queues;
203 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
206 *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
209 * The order of each software xstat type is the total xstat
210 * followed by per-queue xstats.
212 for (i = 0; i < size; i++) {
213 if (id_base <= ids[i] && ids[i] <= id_base + nb_queues) {
214 rc = sfc_sw_stat_get_name(sa, sw_stat,
215 xstats_names[i].name,
216 name_size, ids[i] - id_base);
226 sfc_sw_xstat_get_values(struct sfc_adapter *sa,
227 const struct sfc_sw_stat_descr *sw_stat,
228 struct rte_eth_xstat *xstats,
229 unsigned int xstats_size,
230 unsigned int *nb_written,
231 unsigned int *nb_supported)
235 struct rte_eth_xstat *total_xstat;
236 bool count_total_value = false;
237 unsigned int nb_queues;
239 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
242 *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
245 * The order of each software xstat type is the total xstat
246 * followed by per-queue xstats.
248 if (*nb_written < xstats_size) {
249 count_total_value = true;
250 total_xstat = &xstats[*nb_written];
251 xstats[*nb_written].id = *nb_written;
252 xstats[*nb_written].value = 0;
256 for (qid = 0; qid < nb_queues; ++qid) {
257 value = sw_stat->get_val(sa, qid);
259 if (*nb_written < xstats_size) {
260 xstats[*nb_written].id = *nb_written;
261 xstats[*nb_written].value = value;
265 if (count_total_value)
266 total_xstat->value += value;
271 sfc_sw_xstat_get_values_by_id(struct sfc_adapter *sa,
272 const struct sfc_sw_stat_descr *sw_stat,
275 unsigned int ids_size,
276 unsigned int *nb_supported)
278 rte_spinlock_t *bmp_lock = &sa->sw_stats.queues_bitmap_lock;
279 struct rte_bitmap *bmp = sa->sw_stats.queues_bitmap;
280 unsigned int id_base = *nb_supported;
281 bool count_total_value = false;
282 unsigned int total_value_idx;
283 uint64_t total_value = 0;
285 unsigned int nb_queues;
288 rte_spinlock_lock(bmp_lock);
289 rte_bitmap_reset(bmp);
291 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
294 *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
297 * The order of each software xstat type is the total xstat
298 * followed by per-queue xstats.
300 for (i = 0; i < ids_size; i++) {
301 if (id_base <= ids[i] && ids[i] <= (id_base + nb_queues)) {
302 if (ids[i] == id_base) { /* Accumulative value */
303 count_total_value = true;
307 qid = ids[i] - id_base - 1;
308 values[i] = sw_stat->get_val(sa, qid);
309 total_value += values[i];
311 rte_bitmap_set(bmp, qid);
315 if (count_total_value) {
316 values[total_value_idx] = 0;
317 for (qid = 0; qid < nb_queues; ++qid) {
318 if (rte_bitmap_get(bmp, qid) != 0)
320 values[total_value_idx] += sw_stat->get_val(sa, qid);
322 values[total_value_idx] += total_value;
326 rte_spinlock_unlock(bmp_lock);
330 sfc_sw_xstats_get_nb_supported(struct sfc_adapter *sa)
332 SFC_ASSERT(sfc_adapter_is_locked(sa));
333 return sa->sw_stats.xstats_count;
337 sfc_sw_xstats_get_vals(struct sfc_adapter *sa,
338 struct rte_eth_xstat *xstats,
339 unsigned int xstats_count,
340 unsigned int *nb_written,
341 unsigned int *nb_supported)
343 uint64_t *reset_vals = sa->sw_stats.reset_vals;
344 unsigned int sw_xstats_offset;
347 sfc_adapter_lock(sa);
349 sw_xstats_offset = *nb_supported;
351 for (i = 0; i < RTE_DIM(sfc_sw_stats_descr); i++) {
352 sfc_sw_xstat_get_values(sa, &sfc_sw_stats_descr[i], xstats,
353 xstats_count, nb_written, nb_supported);
356 for (i = sw_xstats_offset; i < *nb_written; i++)
357 xstats[i].value -= reset_vals[i - sw_xstats_offset];
359 sfc_adapter_unlock(sa);
363 sfc_sw_xstats_get_names(struct sfc_adapter *sa,
364 struct rte_eth_xstat_name *xstats_names,
365 unsigned int xstats_count,
366 unsigned int *nb_written,
367 unsigned int *nb_supported)
372 sfc_adapter_lock(sa);
374 for (i = 0; i < RTE_DIM(sfc_sw_stats_descr); i++) {
375 ret = sfc_sw_stat_get_names(sa, &sfc_sw_stats_descr[i],
376 xstats_names, xstats_count,
377 nb_written, nb_supported);
379 sfc_adapter_unlock(sa);
384 sfc_adapter_unlock(sa);
390 sfc_sw_xstats_get_vals_by_id(struct sfc_adapter *sa,
394 unsigned int *nb_supported)
396 uint64_t *reset_vals = sa->sw_stats.reset_vals;
397 unsigned int sw_xstats_offset;
400 sfc_adapter_lock(sa);
402 sw_xstats_offset = *nb_supported;
404 for (i = 0; i < RTE_DIM(sfc_sw_stats_descr); i++) {
405 sfc_sw_xstat_get_values_by_id(sa, &sfc_sw_stats_descr[i], ids,
406 values, n, nb_supported);
409 for (i = 0; i < n; i++) {
410 if (sw_xstats_offset <= ids[i] && ids[i] < *nb_supported)
411 values[i] -= reset_vals[ids[i] - sw_xstats_offset];
414 sfc_adapter_unlock(sa);
418 sfc_sw_xstats_get_names_by_id(struct sfc_adapter *sa,
420 struct rte_eth_xstat_name *xstats_names,
422 unsigned int *nb_supported)
427 sfc_adapter_lock(sa);
429 for (i = 0; i < RTE_DIM(sfc_sw_stats_descr); i++) {
430 ret = sfc_sw_xstat_get_names_by_id(sa, &sfc_sw_stats_descr[i],
431 ids, xstats_names, size,
434 sfc_adapter_unlock(sa);
440 sfc_adapter_unlock(sa);
446 sfc_sw_xstat_reset(struct sfc_adapter *sa,
447 const struct sfc_sw_stat_descr *sw_stat,
448 uint64_t *reset_vals)
450 unsigned int nb_queues;
452 uint64_t *total_xstat_reset;
454 SFC_ASSERT(sfc_adapter_is_locked(sa));
456 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
461 * The order of each software xstat type is the total xstat
462 * followed by per-queue xstats.
464 total_xstat_reset = reset_vals;
465 *total_xstat_reset = 0;
468 for (qid = 0; qid < nb_queues; ++qid) {
469 reset_vals[qid] = sw_stat->get_val(sa, qid);
470 *total_xstat_reset += reset_vals[qid];
475 sfc_sw_xstats_reset(struct sfc_adapter *sa)
477 uint64_t *reset_vals = sa->sw_stats.reset_vals;
478 const struct sfc_sw_stat_descr *sw_stat;
481 SFC_ASSERT(sfc_adapter_is_locked(sa));
483 for (i = 0; i < RTE_DIM(sfc_sw_stats_descr); i++) {
484 sw_stat = &sfc_sw_stats_descr[i];
485 sfc_sw_xstat_reset(sa, sw_stat, reset_vals);
486 reset_vals += sfc_sw_xstat_get_nb_supported(sa, sw_stat);
491 sfc_sw_xstats_configure(struct sfc_adapter *sa)
493 uint64_t **reset_vals = &sa->sw_stats.reset_vals;
494 size_t nb_supported = 0;
497 for (i = 0; i < RTE_DIM(sfc_sw_stats_descr); i++)
498 nb_supported += sfc_sw_xstat_get_nb_supported(sa,
499 &sfc_sw_stats_descr[i]);
500 sa->sw_stats.xstats_count = nb_supported;
502 *reset_vals = rte_realloc(*reset_vals,
503 nb_supported * sizeof(**reset_vals), 0);
504 if (*reset_vals == NULL)
507 memset(*reset_vals, 0, nb_supported * sizeof(**reset_vals));
513 sfc_sw_xstats_free_queues_bitmap(struct sfc_adapter *sa)
515 rte_bitmap_free(sa->sw_stats.queues_bitmap);
516 rte_free(sa->sw_stats.queues_bitmap_mem);
520 sfc_sw_xstats_alloc_queues_bitmap(struct sfc_adapter *sa)
522 struct rte_bitmap **queues_bitmap = &sa->sw_stats.queues_bitmap;
523 void **queues_bitmap_mem = &sa->sw_stats.queues_bitmap_mem;
527 bmp_size = rte_bitmap_get_memory_footprint(RTE_MAX_QUEUES_PER_PORT);
528 *queues_bitmap_mem = NULL;
529 *queues_bitmap = NULL;
531 *queues_bitmap_mem = rte_calloc_socket("bitmap_mem", bmp_size, 1, 0,
533 if (*queues_bitmap_mem == NULL)
536 *queues_bitmap = rte_bitmap_init(RTE_MAX_QUEUES_PER_PORT,
537 *queues_bitmap_mem, bmp_size);
538 if (*queues_bitmap == NULL) {
543 rte_spinlock_init(&sa->sw_stats.queues_bitmap_lock);
547 sfc_sw_xstats_free_queues_bitmap(sa);
552 sfc_sw_xstats_init(struct sfc_adapter *sa)
554 sa->sw_stats.xstats_count = 0;
555 sa->sw_stats.reset_vals = NULL;
557 return sfc_sw_xstats_alloc_queues_bitmap(sa);
561 sfc_sw_xstats_close(struct sfc_adapter *sa)
563 sfc_sw_xstats_free_queues_bitmap(sa);
564 rte_free(sa->sw_stats.reset_vals);
565 sa->sw_stats.reset_vals = NULL;
566 sa->sw_stats.xstats_count = 0;