1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2021 Xilinx, Inc.
6 #include <rte_bitmap.h>
11 #include "sfc_sw_stats.h"
13 #define SFC_SW_STAT_INVALID UINT64_MAX
15 #define SFC_SW_STATS_GROUP_SIZE_MAX 1U
17 enum sfc_sw_stats_type {
22 typedef void sfc_get_sw_stat_val_t(struct sfc_adapter *sa, uint16_t qid,
23 uint64_t *values, unsigned int values_count);
25 struct sfc_sw_stat_descr {
27 enum sfc_sw_stats_type type;
28 sfc_get_sw_stat_val_t *get_val;
32 static sfc_get_sw_stat_val_t sfc_get_sw_stat_val_rx_dbells;
34 sfc_get_sw_stat_val_rx_dbells(struct sfc_adapter *sa, uint16_t qid,
35 uint64_t *values, unsigned int values_count)
37 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
38 struct sfc_rxq_info *rxq_info;
40 RTE_SET_USED(values_count);
41 SFC_ASSERT(values_count == 1);
42 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, qid);
43 values[0] = rxq_info->state & SFC_RXQ_INITIALIZED ?
44 rxq_info->dp->dpq.rx_dbells : 0;
47 static sfc_get_sw_stat_val_t sfc_get_sw_stat_val_tx_dbells;
49 sfc_get_sw_stat_val_tx_dbells(struct sfc_adapter *sa, uint16_t qid,
50 uint64_t *values, unsigned int values_count)
52 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
53 struct sfc_txq_info *txq_info;
55 RTE_SET_USED(values_count);
56 SFC_ASSERT(values_count == 1);
57 txq_info = sfc_txq_info_by_ethdev_qid(sas, qid);
58 values[0] = txq_info->state & SFC_TXQ_INITIALIZED ?
59 txq_info->dp->dpq.tx_dbells : 0;
63 * SW stats can be grouped together. When stats are grouped the corresponding
64 * stats values for each queue are obtained during calling one get value
65 * callback. Stats of the same group are contiguous in the structure below.
66 * The start of the group is denoted by stat implementing get value callback.
68 const struct sfc_sw_stat_descr sfc_sw_stats_descr[] = {
71 .type = SFC_SW_STATS_RX,
72 .get_val = sfc_get_sw_stat_val_rx_dbells,
73 .provide_total = true,
77 .type = SFC_SW_STATS_TX,
78 .get_val = sfc_get_sw_stat_val_tx_dbells,
79 .provide_total = true,
84 sfc_sw_stat_get_name(struct sfc_adapter *sa,
85 const struct sfc_sw_stat_descr *sw_stat, char *name,
86 size_t name_size, unsigned int id_off)
91 switch (sw_stat->type) {
99 sfc_err(sa, "%s: unknown software statistics type %d",
100 __func__, sw_stat->type);
104 if (sw_stat->provide_total && id_off == 0) {
105 ret = snprintf(name, name_size, "%s_%s", prefix,
107 if (ret < 0 || ret >= (int)name_size) {
108 sfc_err(sa, "%s: failed to fill xstat name %s_%s, err %d",
109 __func__, prefix, sw_stat->name, ret);
110 return ret > 0 ? -EINVAL : ret;
113 uint16_t qid = id_off - sw_stat->provide_total;
114 ret = snprintf(name, name_size, "%s_q%u_%s", prefix, qid,
116 if (ret < 0 || ret >= (int)name_size) {
117 sfc_err(sa, "%s: failed to fill xstat name %s_q%u_%s, err %d",
118 __func__, prefix, qid, sw_stat->name, ret);
119 return ret > 0 ? -EINVAL : ret;
127 sfc_sw_stat_get_queue_count(struct sfc_adapter *sa,
128 const struct sfc_sw_stat_descr *sw_stat)
130 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
132 switch (sw_stat->type) {
133 case SFC_SW_STATS_RX:
134 return sas->ethdev_rxq_count;
135 case SFC_SW_STATS_TX:
136 return sas->ethdev_txq_count;
138 sfc_err(sa, "%s: unknown software statistics type %d",
139 __func__, sw_stat->type);
145 sfc_sw_xstat_per_queue_get_count(const struct sfc_sw_stat_descr *sw_stat,
146 unsigned int nb_queues)
148 /* Take into account the total xstat of all queues */
149 return nb_queues > 0 ? sw_stat->provide_total + nb_queues : 0;
153 sfc_sw_xstat_get_nb_supported(struct sfc_adapter *sa,
154 const struct sfc_sw_stat_descr *sw_stat)
156 unsigned int nb_queues;
158 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
159 return sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
163 sfc_sw_stat_get_names(struct sfc_adapter *sa,
164 const struct sfc_sw_stat_descr *sw_stat,
165 struct rte_eth_xstat_name *xstats_names,
166 unsigned int xstats_names_sz,
167 unsigned int *nb_written,
168 unsigned int *nb_supported)
170 const size_t name_size = sizeof(xstats_names[0].name);
171 unsigned int id_base = *nb_supported;
172 unsigned int nb_queues;
176 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
179 *nb_supported += sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
182 * The order of each software xstat type is the total xstat
183 * followed by per-queue xstats.
185 if (*nb_written < xstats_names_sz && sw_stat->provide_total) {
186 rc = sfc_sw_stat_get_name(sa, sw_stat,
187 xstats_names[*nb_written].name,
188 name_size, *nb_written - id_base);
194 for (qid = 0; qid < nb_queues; ++qid) {
195 if (*nb_written < xstats_names_sz) {
196 rc = sfc_sw_stat_get_name(sa, sw_stat,
197 xstats_names[*nb_written].name,
198 name_size, *nb_written - id_base);
209 sfc_sw_xstat_get_names_by_id(struct sfc_adapter *sa,
210 const struct sfc_sw_stat_descr *sw_stat,
212 struct rte_eth_xstat_name *xstats_names,
214 unsigned int *nb_supported)
216 const size_t name_size = sizeof(xstats_names[0].name);
217 unsigned int id_base = *nb_supported;
219 unsigned int nb_queues;
223 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
226 *nb_supported += sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
229 * The order of each software xstat type is the total xstat
230 * followed by per-queue xstats.
232 id_end = id_base + sw_stat->provide_total + nb_queues;
233 for (i = 0; i < size; i++) {
234 if (id_base <= ids[i] && ids[i] < id_end) {
235 rc = sfc_sw_stat_get_name(sa, sw_stat,
236 xstats_names[i].name,
237 name_size, ids[i] - id_base);
247 sfc_sw_stat_get_val(struct sfc_adapter *sa,
248 unsigned int sw_stat_idx, uint16_t qid)
250 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
251 uint64_t *res = &sw_stats->supp[sw_stat_idx].cache[qid];
252 uint64_t values[SFC_SW_STATS_GROUP_SIZE_MAX];
253 unsigned int group_start_idx;
254 unsigned int group_size;
257 if (*res != SFC_SW_STAT_INVALID)
261 * Search for the group start, i.e. the stat that implements
262 * get value callback.
264 group_start_idx = sw_stat_idx;
265 while (sw_stats->supp[group_start_idx].descr->get_val == NULL)
269 * Calculate number of elements in the group with loop till the next
270 * group start or the list end.
273 for (i = sw_stat_idx + 1; i < sw_stats->supp_count; i++) {
274 if (sw_stats->supp[i].descr->get_val != NULL)
278 group_size += sw_stat_idx - group_start_idx;
280 SFC_ASSERT(group_size <= SFC_SW_STATS_GROUP_SIZE_MAX);
281 sw_stats->supp[group_start_idx].descr->get_val(sa, qid, values,
283 for (i = group_start_idx; i < (group_start_idx + group_size); i++)
284 sw_stats->supp[i].cache[qid] = values[i - group_start_idx];
290 sfc_sw_xstat_get_values(struct sfc_adapter *sa,
291 const struct sfc_sw_stat_descr *sw_stat,
292 unsigned int sw_stat_idx,
293 struct rte_eth_xstat *xstats,
294 unsigned int xstats_size,
295 unsigned int *nb_written,
296 unsigned int *nb_supported)
300 struct rte_eth_xstat *total_xstat;
301 bool count_total_value = false;
302 unsigned int nb_queues;
304 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
307 *nb_supported += sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
310 * The order of each software xstat type is the total xstat
311 * followed by per-queue xstats.
313 if (*nb_written < xstats_size && sw_stat->provide_total) {
314 count_total_value = true;
315 total_xstat = &xstats[*nb_written];
316 xstats[*nb_written].id = *nb_written;
317 xstats[*nb_written].value = 0;
321 for (qid = 0; qid < nb_queues; ++qid) {
322 value = sfc_sw_stat_get_val(sa, sw_stat_idx, qid);
324 if (*nb_written < xstats_size) {
325 xstats[*nb_written].id = *nb_written;
326 xstats[*nb_written].value = value;
330 if (count_total_value)
331 total_xstat->value += value;
336 sfc_sw_xstat_get_values_by_id(struct sfc_adapter *sa,
337 const struct sfc_sw_stat_descr *sw_stat,
338 unsigned int sw_stat_idx,
341 unsigned int ids_size,
342 unsigned int *nb_supported)
344 rte_spinlock_t *bmp_lock = &sa->sw_stats.queues_bitmap_lock;
345 struct rte_bitmap *bmp = sa->sw_stats.queues_bitmap;
346 unsigned int id_base = *nb_supported;
347 unsigned int id_base_q;
349 bool count_total_value = false;
350 unsigned int total_value_idx;
351 uint64_t total_value = 0;
353 unsigned int nb_queues;
356 rte_spinlock_lock(bmp_lock);
357 rte_bitmap_reset(bmp);
359 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
362 *nb_supported += sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
365 * The order of each software xstat type is the total xstat
366 * followed by per-queue xstats.
368 id_end = id_base + sw_stat->provide_total + nb_queues;
369 for (i = 0; i < ids_size; i++) {
370 if (id_base <= ids[i] && ids[i] < id_end) {
371 if (sw_stat->provide_total && ids[i] == id_base) {
372 /* Accumulative value */
373 count_total_value = true;
377 id_base_q = id_base + sw_stat->provide_total;
378 qid = ids[i] - id_base_q;
379 values[i] = sfc_sw_stat_get_val(sa, sw_stat_idx, qid);
380 total_value += values[i];
382 rte_bitmap_set(bmp, qid);
386 if (count_total_value) {
387 values[total_value_idx] = 0;
388 for (qid = 0; qid < nb_queues; ++qid) {
389 if (rte_bitmap_get(bmp, qid) != 0)
391 values[total_value_idx] += sfc_sw_stat_get_val(sa,
395 values[total_value_idx] += total_value;
399 rte_spinlock_unlock(bmp_lock);
403 sfc_sw_xstats_get_nb_supported(struct sfc_adapter *sa)
405 SFC_ASSERT(sfc_adapter_is_locked(sa));
406 return sa->sw_stats.xstats_count;
410 sfc_sw_stats_clear_cache(struct sfc_adapter *sa)
412 unsigned int cache_count = sa->sw_stats.cache_count;
413 uint64_t *cache = sa->sw_stats.cache;
415 RTE_BUILD_BUG_ON(UINT64_C(0xffffffffffffffff) != SFC_SW_STAT_INVALID);
416 memset(cache, 0xff, cache_count * sizeof(*cache));
420 sfc_sw_xstats_get_vals(struct sfc_adapter *sa,
421 struct rte_eth_xstat *xstats,
422 unsigned int xstats_count,
423 unsigned int *nb_written,
424 unsigned int *nb_supported)
426 uint64_t *reset_vals = sa->sw_stats.reset_vals;
427 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
428 unsigned int sw_xstats_offset;
431 sfc_adapter_lock(sa);
433 sfc_sw_stats_clear_cache(sa);
435 sw_xstats_offset = *nb_supported;
437 for (i = 0; i < sw_stats->supp_count; i++) {
438 sfc_sw_xstat_get_values(sa, sw_stats->supp[i].descr, i,
439 xstats, xstats_count, nb_written, nb_supported);
442 for (i = sw_xstats_offset; i < *nb_written; i++)
443 xstats[i].value -= reset_vals[i - sw_xstats_offset];
445 sfc_adapter_unlock(sa);
449 sfc_sw_xstats_get_names(struct sfc_adapter *sa,
450 struct rte_eth_xstat_name *xstats_names,
451 unsigned int xstats_count,
452 unsigned int *nb_written,
453 unsigned int *nb_supported)
455 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
459 sfc_adapter_lock(sa);
461 for (i = 0; i < sw_stats->supp_count; i++) {
462 ret = sfc_sw_stat_get_names(sa, sw_stats->supp[i].descr,
463 xstats_names, xstats_count,
464 nb_written, nb_supported);
466 sfc_adapter_unlock(sa);
471 sfc_adapter_unlock(sa);
477 sfc_sw_xstats_get_vals_by_id(struct sfc_adapter *sa,
481 unsigned int *nb_supported)
483 uint64_t *reset_vals = sa->sw_stats.reset_vals;
484 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
485 unsigned int sw_xstats_offset;
488 sfc_adapter_lock(sa);
490 sfc_sw_stats_clear_cache(sa);
492 sw_xstats_offset = *nb_supported;
494 for (i = 0; i < sw_stats->supp_count; i++) {
495 sfc_sw_xstat_get_values_by_id(sa, sw_stats->supp[i].descr, i,
496 ids, values, n, nb_supported);
499 for (i = 0; i < n; i++) {
500 if (sw_xstats_offset <= ids[i] && ids[i] < *nb_supported)
501 values[i] -= reset_vals[ids[i] - sw_xstats_offset];
504 sfc_adapter_unlock(sa);
508 sfc_sw_xstats_get_names_by_id(struct sfc_adapter *sa,
510 struct rte_eth_xstat_name *xstats_names,
512 unsigned int *nb_supported)
514 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
518 sfc_adapter_lock(sa);
520 for (i = 0; i < sw_stats->supp_count; i++) {
521 ret = sfc_sw_xstat_get_names_by_id(sa, sw_stats->supp[i].descr,
522 ids, xstats_names, size,
525 sfc_adapter_unlock(sa);
531 sfc_adapter_unlock(sa);
537 sfc_sw_xstat_reset(struct sfc_adapter *sa,
538 const struct sfc_sw_stat_descr *sw_stat,
539 unsigned int sw_stat_idx,
540 uint64_t *reset_vals)
542 unsigned int nb_queues;
544 uint64_t *total_xstat_reset = NULL;
546 SFC_ASSERT(sfc_adapter_is_locked(sa));
548 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
553 * The order of each software xstat type is the total xstat
554 * followed by per-queue xstats.
556 if (sw_stat->provide_total) {
557 total_xstat_reset = reset_vals;
558 *total_xstat_reset = 0;
562 for (qid = 0; qid < nb_queues; ++qid) {
563 reset_vals[qid] = sfc_sw_stat_get_val(sa, sw_stat_idx, qid);
564 if (sw_stat->provide_total)
565 *total_xstat_reset += reset_vals[qid];
570 sfc_sw_xstats_reset(struct sfc_adapter *sa)
572 uint64_t *reset_vals = sa->sw_stats.reset_vals;
573 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
576 SFC_ASSERT(sfc_adapter_is_locked(sa));
578 sfc_sw_stats_clear_cache(sa);
580 for (i = 0; i < sw_stats->supp_count; i++) {
581 sfc_sw_xstat_reset(sa, sw_stats->supp[i].descr, i, reset_vals);
582 reset_vals += sfc_sw_xstat_get_nb_supported(sa,
583 sw_stats->supp[i].descr);
588 sfc_sw_xstats_configure(struct sfc_adapter *sa)
590 uint64_t **reset_vals = &sa->sw_stats.reset_vals;
591 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
592 unsigned int cache_count = 0;
593 uint64_t **cache = &sa->sw_stats.cache;
594 uint64_t *stat_cache;
595 size_t nb_supported = 0;
599 sw_stats->supp_count = RTE_DIM(sfc_sw_stats_descr);
600 if (sw_stats->supp == NULL) {
601 sw_stats->supp = rte_malloc(NULL, sw_stats->supp_count *
602 sizeof(*sw_stats->supp), 0);
603 if (sw_stats->supp == NULL)
606 for (i = 0; i < sw_stats->supp_count; i++)
607 sw_stats->supp[i].descr = &sfc_sw_stats_descr[i];
609 for (i = 0; i < sw_stats->supp_count; i++) {
610 nb_supported += sfc_sw_xstat_get_nb_supported(sa,
611 sw_stats->supp[i].descr);
612 cache_count += sfc_sw_stat_get_queue_count(sa,
613 sw_stats->supp[i].descr);
615 sa->sw_stats.xstats_count = nb_supported;
617 *reset_vals = rte_realloc(*reset_vals,
618 nb_supported * sizeof(**reset_vals), 0);
619 if (*reset_vals == NULL) {
621 goto fail_reset_vals;
624 memset(*reset_vals, 0, nb_supported * sizeof(**reset_vals));
626 *cache = rte_realloc(*cache, cache_count * sizeof(*cache), 0);
627 if (*cache == NULL) {
631 sa->sw_stats.cache_count = cache_count;
634 for (i = 0; i < sw_stats->supp_count; i++) {
635 sw_stats->supp[i].cache = stat_cache;
636 stat_cache += sfc_sw_stat_get_queue_count(sa,
637 sw_stats->supp[i].descr);
643 rte_free(*reset_vals);
646 sa->sw_stats.xstats_count = 0;
647 rte_free(sw_stats->supp);
648 sw_stats->supp = NULL;
649 sw_stats->supp_count = 0;
655 sfc_sw_xstats_free_queues_bitmap(struct sfc_adapter *sa)
657 rte_bitmap_free(sa->sw_stats.queues_bitmap);
658 rte_free(sa->sw_stats.queues_bitmap_mem);
662 sfc_sw_xstats_alloc_queues_bitmap(struct sfc_adapter *sa)
664 struct rte_bitmap **queues_bitmap = &sa->sw_stats.queues_bitmap;
665 void **queues_bitmap_mem = &sa->sw_stats.queues_bitmap_mem;
669 bmp_size = rte_bitmap_get_memory_footprint(RTE_MAX_QUEUES_PER_PORT);
670 *queues_bitmap_mem = NULL;
671 *queues_bitmap = NULL;
673 *queues_bitmap_mem = rte_calloc_socket("bitmap_mem", bmp_size, 1, 0,
675 if (*queues_bitmap_mem == NULL)
678 *queues_bitmap = rte_bitmap_init(RTE_MAX_QUEUES_PER_PORT,
679 *queues_bitmap_mem, bmp_size);
680 if (*queues_bitmap == NULL) {
685 rte_spinlock_init(&sa->sw_stats.queues_bitmap_lock);
689 sfc_sw_xstats_free_queues_bitmap(sa);
694 sfc_sw_xstats_init(struct sfc_adapter *sa)
696 sa->sw_stats.xstats_count = 0;
697 sa->sw_stats.supp = NULL;
698 sa->sw_stats.supp_count = 0;
699 sa->sw_stats.cache = NULL;
700 sa->sw_stats.cache_count = 0;
701 sa->sw_stats.reset_vals = NULL;
703 return sfc_sw_xstats_alloc_queues_bitmap(sa);
707 sfc_sw_xstats_close(struct sfc_adapter *sa)
709 sfc_sw_xstats_free_queues_bitmap(sa);
710 sa->sw_stats.reset_vals = NULL;
711 rte_free(sa->sw_stats.cache);
712 sa->sw_stats.cache = NULL;
713 sa->sw_stats.cache_count = 0;
714 rte_free(sa->sw_stats.reset_vals);
715 rte_free(sa->sw_stats.supp);
716 sa->sw_stats.supp = NULL;
717 sa->sw_stats.supp_count = 0;
718 sa->sw_stats.xstats_count = 0;