1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2021 Xilinx, Inc.
6 #include <rte_bitmap.h>
11 #include "sfc_sw_stats.h"
13 #define SFC_SW_STAT_INVALID UINT64_MAX
15 #define SFC_SW_STATS_GROUP_SIZE_MAX 2U
16 #define SFC_SW_STAT_GOOD_PACKETS "packets"
17 #define SFC_SW_STAT_GOOD_BYTES "bytes"
19 enum sfc_sw_stats_type {
24 enum sfc_sw_stats_group_basic {
25 SFC_SW_STATS_GROUP_BASIC_PKTS = 0,
26 SFC_SW_STATS_GROUP_BASIC_BYTES,
27 SFX_SW_STATS_GROUP_BASIC_MAX
30 typedef void sfc_get_sw_stat_val_t(struct sfc_adapter *sa, uint16_t qid,
31 uint64_t *values, unsigned int values_count);
33 struct sfc_sw_stat_descr {
35 enum sfc_sw_stats_type type;
36 sfc_get_sw_stat_val_t *get_val;
40 static sfc_get_sw_stat_val_t sfc_sw_stat_get_rx_good_pkts_bytes;
42 sfc_sw_stat_get_rx_good_pkts_bytes(struct sfc_adapter *sa, uint16_t qid,
44 unsigned int values_count)
46 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
47 struct sfc_rxq_info *rxq_info;
48 union sfc_pkts_bytes qstats;
50 RTE_SET_USED(values_count);
51 SFC_ASSERT(values_count == SFX_SW_STATS_GROUP_BASIC_MAX);
52 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, qid);
53 if (rxq_info->state & SFC_RXQ_INITIALIZED) {
54 sfc_pkts_bytes_get(&rxq_info->dp->dpq.stats, &qstats);
55 values[SFC_SW_STATS_GROUP_BASIC_PKTS] = qstats.pkts;
56 values[SFC_SW_STATS_GROUP_BASIC_BYTES] = qstats.bytes;
58 values[SFC_SW_STATS_GROUP_BASIC_PKTS] = 0;
59 values[SFC_SW_STATS_GROUP_BASIC_BYTES] = 0;
63 static sfc_get_sw_stat_val_t sfc_sw_stat_get_tx_good_pkts_bytes;
65 sfc_sw_stat_get_tx_good_pkts_bytes(struct sfc_adapter *sa, uint16_t qid,
67 unsigned int values_count)
69 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
70 struct sfc_txq_info *txq_info;
71 union sfc_pkts_bytes qstats;
73 RTE_SET_USED(values_count);
74 SFC_ASSERT(values_count == SFX_SW_STATS_GROUP_BASIC_MAX);
75 txq_info = sfc_txq_info_by_ethdev_qid(sas, qid);
76 if (txq_info->state & SFC_TXQ_INITIALIZED) {
77 sfc_pkts_bytes_get(&txq_info->dp->dpq.stats, &qstats);
78 values[SFC_SW_STATS_GROUP_BASIC_PKTS] = qstats.pkts;
79 values[SFC_SW_STATS_GROUP_BASIC_BYTES] = qstats.bytes;
81 values[SFC_SW_STATS_GROUP_BASIC_PKTS] = 0;
82 values[SFC_SW_STATS_GROUP_BASIC_BYTES] = 0;
86 static sfc_get_sw_stat_val_t sfc_get_sw_stat_val_rx_dbells;
88 sfc_get_sw_stat_val_rx_dbells(struct sfc_adapter *sa, uint16_t qid,
89 uint64_t *values, unsigned int values_count)
91 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
92 struct sfc_rxq_info *rxq_info;
94 RTE_SET_USED(values_count);
95 SFC_ASSERT(values_count == 1);
96 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, qid);
97 values[0] = rxq_info->state & SFC_RXQ_INITIALIZED ?
98 rxq_info->dp->dpq.rx_dbells : 0;
101 static sfc_get_sw_stat_val_t sfc_get_sw_stat_val_tx_dbells;
103 sfc_get_sw_stat_val_tx_dbells(struct sfc_adapter *sa, uint16_t qid,
104 uint64_t *values, unsigned int values_count)
106 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
107 struct sfc_txq_info *txq_info;
109 RTE_SET_USED(values_count);
110 SFC_ASSERT(values_count == 1);
111 txq_info = sfc_txq_info_by_ethdev_qid(sas, qid);
112 values[0] = txq_info->state & SFC_TXQ_INITIALIZED ?
113 txq_info->dp->dpq.tx_dbells : 0;
117 * SW stats can be grouped together. When stats are grouped the corresponding
118 * stats values for each queue are obtained during calling one get value
119 * callback. Stats of the same group are contiguous in the structure below.
120 * The start of the group is denoted by stat implementing get value callback.
122 const struct sfc_sw_stat_descr sfc_sw_stats_descr[] = {
123 /* Group of Rx packets/bytes stats */
125 .name = SFC_SW_STAT_GOOD_PACKETS,
126 .type = SFC_SW_STATS_RX,
127 .get_val = sfc_sw_stat_get_rx_good_pkts_bytes,
128 .provide_total = false,
131 .name = SFC_SW_STAT_GOOD_BYTES,
132 .type = SFC_SW_STATS_RX,
134 .provide_total = false,
136 /* Group of Tx packets/bytes stats */
138 .name = SFC_SW_STAT_GOOD_PACKETS,
139 .type = SFC_SW_STATS_TX,
140 .get_val = sfc_sw_stat_get_tx_good_pkts_bytes,
141 .provide_total = false,
144 .name = SFC_SW_STAT_GOOD_BYTES,
145 .type = SFC_SW_STATS_TX,
147 .provide_total = false,
149 /* End of basic stats */
152 .type = SFC_SW_STATS_RX,
153 .get_val = sfc_get_sw_stat_val_rx_dbells,
154 .provide_total = true,
158 .type = SFC_SW_STATS_TX,
159 .get_val = sfc_get_sw_stat_val_tx_dbells,
160 .provide_total = true,
165 sfc_sw_stat_get_name(struct sfc_adapter *sa,
166 const struct sfc_sw_stat_descr *sw_stat, char *name,
167 size_t name_size, unsigned int id_off)
172 switch (sw_stat->type) {
173 case SFC_SW_STATS_RX:
176 case SFC_SW_STATS_TX:
180 sfc_err(sa, "%s: unknown software statistics type %d",
181 __func__, sw_stat->type);
185 if (sw_stat->provide_total && id_off == 0) {
186 ret = snprintf(name, name_size, "%s_%s", prefix,
188 if (ret < 0 || ret >= (int)name_size) {
189 sfc_err(sa, "%s: failed to fill xstat name %s_%s, err %d",
190 __func__, prefix, sw_stat->name, ret);
191 return ret > 0 ? -EINVAL : ret;
194 uint16_t qid = id_off - sw_stat->provide_total;
195 ret = snprintf(name, name_size, "%s_q%u_%s", prefix, qid,
197 if (ret < 0 || ret >= (int)name_size) {
198 sfc_err(sa, "%s: failed to fill xstat name %s_q%u_%s, err %d",
199 __func__, prefix, qid, sw_stat->name, ret);
200 return ret > 0 ? -EINVAL : ret;
208 sfc_sw_stat_get_queue_count(struct sfc_adapter *sa,
209 const struct sfc_sw_stat_descr *sw_stat)
211 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
213 switch (sw_stat->type) {
214 case SFC_SW_STATS_RX:
215 return sas->ethdev_rxq_count;
216 case SFC_SW_STATS_TX:
217 return sas->ethdev_txq_count;
219 sfc_err(sa, "%s: unknown software statistics type %d",
220 __func__, sw_stat->type);
226 sfc_sw_xstat_per_queue_get_count(const struct sfc_sw_stat_descr *sw_stat,
227 unsigned int nb_queues)
229 /* Take into account the total xstat of all queues */
230 return nb_queues > 0 ? sw_stat->provide_total + nb_queues : 0;
234 sfc_sw_xstat_get_nb_supported(struct sfc_adapter *sa,
235 const struct sfc_sw_stat_descr *sw_stat)
237 unsigned int nb_queues;
239 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
240 return sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
244 sfc_sw_stat_get_names(struct sfc_adapter *sa,
245 const struct sfc_sw_stat_descr *sw_stat,
246 struct rte_eth_xstat_name *xstats_names,
247 unsigned int xstats_names_sz,
248 unsigned int *nb_written,
249 unsigned int *nb_supported)
251 const size_t name_size = sizeof(xstats_names[0].name);
252 unsigned int id_base = *nb_supported;
253 unsigned int nb_queues;
257 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
260 *nb_supported += sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
263 * The order of each software xstat type is the total xstat
264 * followed by per-queue xstats.
266 if (*nb_written < xstats_names_sz && sw_stat->provide_total) {
267 rc = sfc_sw_stat_get_name(sa, sw_stat,
268 xstats_names[*nb_written].name,
269 name_size, *nb_written - id_base);
275 for (qid = 0; qid < nb_queues; ++qid) {
276 if (*nb_written < xstats_names_sz) {
277 rc = sfc_sw_stat_get_name(sa, sw_stat,
278 xstats_names[*nb_written].name,
279 name_size, *nb_written - id_base);
290 sfc_sw_xstat_get_names_by_id(struct sfc_adapter *sa,
291 const struct sfc_sw_stat_descr *sw_stat,
293 struct rte_eth_xstat_name *xstats_names,
295 unsigned int *nb_supported)
297 const size_t name_size = sizeof(xstats_names[0].name);
298 unsigned int id_base = *nb_supported;
300 unsigned int nb_queues;
304 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
307 *nb_supported += sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
310 * The order of each software xstat type is the total xstat
311 * followed by per-queue xstats.
313 id_end = id_base + sw_stat->provide_total + nb_queues;
314 for (i = 0; i < size; i++) {
315 if (id_base <= ids[i] && ids[i] < id_end) {
316 rc = sfc_sw_stat_get_name(sa, sw_stat,
317 xstats_names[i].name,
318 name_size, ids[i] - id_base);
328 sfc_sw_stat_get_val(struct sfc_adapter *sa,
329 unsigned int sw_stat_idx, uint16_t qid)
331 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
332 uint64_t *res = &sw_stats->supp[sw_stat_idx].cache[qid];
333 uint64_t values[SFC_SW_STATS_GROUP_SIZE_MAX];
334 unsigned int group_start_idx;
335 unsigned int group_size;
338 if (*res != SFC_SW_STAT_INVALID)
342 * Search for the group start, i.e. the stat that implements
343 * get value callback.
345 group_start_idx = sw_stat_idx;
346 while (sw_stats->supp[group_start_idx].descr->get_val == NULL)
350 * Calculate number of elements in the group with loop till the next
351 * group start or the list end.
354 for (i = sw_stat_idx + 1; i < sw_stats->supp_count; i++) {
355 if (sw_stats->supp[i].descr->get_val != NULL)
359 group_size += sw_stat_idx - group_start_idx;
361 SFC_ASSERT(group_size <= SFC_SW_STATS_GROUP_SIZE_MAX);
362 sw_stats->supp[group_start_idx].descr->get_val(sa, qid, values,
364 for (i = group_start_idx; i < (group_start_idx + group_size); i++)
365 sw_stats->supp[i].cache[qid] = values[i - group_start_idx];
371 sfc_sw_xstat_get_values(struct sfc_adapter *sa,
372 const struct sfc_sw_stat_descr *sw_stat,
373 unsigned int sw_stat_idx,
374 struct rte_eth_xstat *xstats,
375 unsigned int xstats_size,
376 unsigned int *nb_written,
377 unsigned int *nb_supported)
381 struct rte_eth_xstat *total_xstat;
382 bool count_total_value = false;
383 unsigned int nb_queues;
385 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
388 *nb_supported += sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
391 * The order of each software xstat type is the total xstat
392 * followed by per-queue xstats.
394 if (*nb_written < xstats_size && sw_stat->provide_total) {
395 count_total_value = true;
396 total_xstat = &xstats[*nb_written];
397 xstats[*nb_written].id = *nb_written;
398 xstats[*nb_written].value = 0;
402 for (qid = 0; qid < nb_queues; ++qid) {
403 value = sfc_sw_stat_get_val(sa, sw_stat_idx, qid);
405 if (*nb_written < xstats_size) {
406 xstats[*nb_written].id = *nb_written;
407 xstats[*nb_written].value = value;
411 if (count_total_value)
412 total_xstat->value += value;
417 sfc_sw_xstat_get_values_by_id(struct sfc_adapter *sa,
418 const struct sfc_sw_stat_descr *sw_stat,
419 unsigned int sw_stat_idx,
422 unsigned int ids_size,
423 unsigned int *nb_supported)
425 rte_spinlock_t *bmp_lock = &sa->sw_stats.queues_bitmap_lock;
426 struct rte_bitmap *bmp = sa->sw_stats.queues_bitmap;
427 unsigned int id_base = *nb_supported;
428 unsigned int id_base_q;
430 bool count_total_value = false;
431 unsigned int total_value_idx;
432 uint64_t total_value = 0;
434 unsigned int nb_queues;
437 rte_spinlock_lock(bmp_lock);
438 rte_bitmap_reset(bmp);
440 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
443 *nb_supported += sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
446 * The order of each software xstat type is the total xstat
447 * followed by per-queue xstats.
449 id_end = id_base + sw_stat->provide_total + nb_queues;
450 for (i = 0; i < ids_size; i++) {
451 if (id_base <= ids[i] && ids[i] < id_end) {
452 if (sw_stat->provide_total && ids[i] == id_base) {
453 /* Accumulative value */
454 count_total_value = true;
458 id_base_q = id_base + sw_stat->provide_total;
459 qid = ids[i] - id_base_q;
460 values[i] = sfc_sw_stat_get_val(sa, sw_stat_idx, qid);
461 total_value += values[i];
463 rte_bitmap_set(bmp, qid);
467 if (count_total_value) {
468 values[total_value_idx] = 0;
469 for (qid = 0; qid < nb_queues; ++qid) {
470 if (rte_bitmap_get(bmp, qid) != 0)
472 values[total_value_idx] += sfc_sw_stat_get_val(sa,
476 values[total_value_idx] += total_value;
480 rte_spinlock_unlock(bmp_lock);
484 sfc_sw_xstats_get_nb_supported(struct sfc_adapter *sa)
486 SFC_ASSERT(sfc_adapter_is_locked(sa));
487 return sa->sw_stats.xstats_count;
491 sfc_sw_stats_clear_cache(struct sfc_adapter *sa)
493 unsigned int cache_count = sa->sw_stats.cache_count;
494 uint64_t *cache = sa->sw_stats.cache;
496 RTE_BUILD_BUG_ON(UINT64_C(0xffffffffffffffff) != SFC_SW_STAT_INVALID);
497 memset(cache, 0xff, cache_count * sizeof(*cache));
501 sfc_sw_xstats_get_vals(struct sfc_adapter *sa,
502 struct rte_eth_xstat *xstats,
503 unsigned int xstats_count,
504 unsigned int *nb_written,
505 unsigned int *nb_supported)
507 uint64_t *reset_vals = sa->sw_stats.reset_vals;
508 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
509 unsigned int sw_xstats_offset;
512 sfc_adapter_lock(sa);
514 sfc_sw_stats_clear_cache(sa);
516 sw_xstats_offset = *nb_supported;
518 for (i = 0; i < sw_stats->supp_count; i++) {
519 sfc_sw_xstat_get_values(sa, sw_stats->supp[i].descr, i,
520 xstats, xstats_count, nb_written, nb_supported);
523 for (i = sw_xstats_offset; i < *nb_written; i++)
524 xstats[i].value -= reset_vals[i - sw_xstats_offset];
526 sfc_adapter_unlock(sa);
530 sfc_sw_xstats_get_names(struct sfc_adapter *sa,
531 struct rte_eth_xstat_name *xstats_names,
532 unsigned int xstats_count,
533 unsigned int *nb_written,
534 unsigned int *nb_supported)
536 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
540 sfc_adapter_lock(sa);
542 for (i = 0; i < sw_stats->supp_count; i++) {
543 ret = sfc_sw_stat_get_names(sa, sw_stats->supp[i].descr,
544 xstats_names, xstats_count,
545 nb_written, nb_supported);
547 sfc_adapter_unlock(sa);
552 sfc_adapter_unlock(sa);
558 sfc_sw_xstats_get_vals_by_id(struct sfc_adapter *sa,
562 unsigned int *nb_supported)
564 uint64_t *reset_vals = sa->sw_stats.reset_vals;
565 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
566 unsigned int sw_xstats_offset;
569 sfc_adapter_lock(sa);
571 sfc_sw_stats_clear_cache(sa);
573 sw_xstats_offset = *nb_supported;
575 for (i = 0; i < sw_stats->supp_count; i++) {
576 sfc_sw_xstat_get_values_by_id(sa, sw_stats->supp[i].descr, i,
577 ids, values, n, nb_supported);
580 for (i = 0; i < n; i++) {
581 if (sw_xstats_offset <= ids[i] && ids[i] < *nb_supported)
582 values[i] -= reset_vals[ids[i] - sw_xstats_offset];
585 sfc_adapter_unlock(sa);
589 sfc_sw_xstats_get_names_by_id(struct sfc_adapter *sa,
591 struct rte_eth_xstat_name *xstats_names,
593 unsigned int *nb_supported)
595 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
599 sfc_adapter_lock(sa);
601 for (i = 0; i < sw_stats->supp_count; i++) {
602 ret = sfc_sw_xstat_get_names_by_id(sa, sw_stats->supp[i].descr,
603 ids, xstats_names, size,
606 sfc_adapter_unlock(sa);
612 sfc_adapter_unlock(sa);
618 sfc_sw_xstat_reset(struct sfc_adapter *sa,
619 const struct sfc_sw_stat_descr *sw_stat,
620 unsigned int sw_stat_idx,
621 uint64_t *reset_vals)
623 unsigned int nb_queues;
625 uint64_t *total_xstat_reset = NULL;
627 SFC_ASSERT(sfc_adapter_is_locked(sa));
629 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
634 * The order of each software xstat type is the total xstat
635 * followed by per-queue xstats.
637 if (sw_stat->provide_total) {
638 total_xstat_reset = reset_vals;
639 *total_xstat_reset = 0;
643 for (qid = 0; qid < nb_queues; ++qid) {
644 reset_vals[qid] = sfc_sw_stat_get_val(sa, sw_stat_idx, qid);
645 if (sw_stat->provide_total)
646 *total_xstat_reset += reset_vals[qid];
651 sfc_sw_xstats_reset(struct sfc_adapter *sa)
653 uint64_t *reset_vals = sa->sw_stats.reset_vals;
654 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
657 SFC_ASSERT(sfc_adapter_is_locked(sa));
659 sfc_sw_stats_clear_cache(sa);
661 for (i = 0; i < sw_stats->supp_count; i++) {
662 sfc_sw_xstat_reset(sa, sw_stats->supp[i].descr, i, reset_vals);
663 reset_vals += sfc_sw_xstat_get_nb_supported(sa,
664 sw_stats->supp[i].descr);
669 sfc_sw_stats_is_packets_or_bytes(const char *xstat_name)
671 return strcmp(xstat_name, SFC_SW_STAT_GOOD_PACKETS) == 0 ||
672 strcmp(xstat_name, SFC_SW_STAT_GOOD_BYTES) == 0;
676 sfc_sw_stats_fill_available_descr(struct sfc_adapter *sa)
678 const struct sfc_adapter_priv *sap = &sa->priv;
679 bool have_dp_rx_stats = sap->dp_rx->features & SFC_DP_RX_FEAT_STATS;
680 bool have_dp_tx_stats = sap->dp_tx->features & SFC_DP_TX_FEAT_STATS;
681 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
682 const struct sfc_sw_stat_descr *sw_stat_descr;
685 sw_stats->supp_count = 0;
686 for (i = 0; i < RTE_DIM(sfc_sw_stats_descr); i++) {
687 sw_stat_descr = &sfc_sw_stats_descr[i];
688 if (!have_dp_rx_stats &&
689 sw_stat_descr->type == SFC_SW_STATS_RX &&
690 sfc_sw_stats_is_packets_or_bytes(sw_stat_descr->name))
692 if (!have_dp_tx_stats &&
693 sw_stat_descr->type == SFC_SW_STATS_TX &&
694 sfc_sw_stats_is_packets_or_bytes(sw_stat_descr->name))
696 sw_stats->supp[sw_stats->supp_count].descr = sw_stat_descr;
697 sw_stats->supp_count++;
702 sfc_sw_stats_set_reset_basic_stats(struct sfc_adapter *sa)
704 uint64_t *reset_vals = sa->sw_stats.reset_vals;
705 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
706 const struct sfc_sw_stat_descr *sw_stat;
709 for (i = 0; i < sw_stats->supp_count; i++) {
710 sw_stat = sw_stats->supp[i].descr;
712 switch (sw_stat->type) {
713 case SFC_SW_STATS_RX:
714 if (strcmp(sw_stat->name,
715 SFC_SW_STAT_GOOD_PACKETS) == 0)
716 sa->sw_stats.reset_rx_pkts = reset_vals;
717 else if (strcmp(sw_stat->name,
718 SFC_SW_STAT_GOOD_BYTES) == 0)
719 sa->sw_stats.reset_rx_bytes = reset_vals;
721 case SFC_SW_STATS_TX:
722 if (strcmp(sw_stat->name,
723 SFC_SW_STAT_GOOD_PACKETS) == 0)
724 sa->sw_stats.reset_tx_pkts = reset_vals;
725 else if (strcmp(sw_stat->name,
726 SFC_SW_STAT_GOOD_BYTES) == 0)
727 sa->sw_stats.reset_tx_bytes = reset_vals;
730 SFC_GENERIC_LOG(ERR, "Unknown SW stat type");
734 reset_vals += sfc_sw_xstat_get_nb_supported(sa, sw_stat);
741 sfc_sw_xstats_configure(struct sfc_adapter *sa)
743 uint64_t **reset_vals = &sa->sw_stats.reset_vals;
744 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
745 unsigned int cache_count = 0;
746 uint64_t **cache = &sa->sw_stats.cache;
747 uint64_t *stat_cache;
748 size_t nb_supported = 0;
752 sw_stats->supp_count = RTE_DIM(sfc_sw_stats_descr);
753 if (sw_stats->supp == NULL) {
754 sw_stats->supp = rte_malloc(NULL, sw_stats->supp_count *
755 sizeof(*sw_stats->supp), 0);
756 if (sw_stats->supp == NULL)
759 for (i = 0; i < sw_stats->supp_count; i++)
760 sw_stats->supp[i].descr = &sfc_sw_stats_descr[i];
761 sfc_sw_stats_fill_available_descr(sa);
763 for (i = 0; i < sw_stats->supp_count; i++) {
764 nb_supported += sfc_sw_xstat_get_nb_supported(sa,
765 sw_stats->supp[i].descr);
766 cache_count += sfc_sw_stat_get_queue_count(sa,
767 sw_stats->supp[i].descr);
769 sa->sw_stats.xstats_count = nb_supported;
771 *reset_vals = rte_realloc(*reset_vals,
772 nb_supported * sizeof(**reset_vals), 0);
773 if (*reset_vals == NULL) {
775 goto fail_reset_vals;
778 memset(*reset_vals, 0, nb_supported * sizeof(**reset_vals));
780 *cache = rte_realloc(*cache, cache_count * sizeof(*cache), 0);
781 if (*cache == NULL) {
785 sa->sw_stats.cache_count = cache_count;
787 rc = sfc_sw_stats_set_reset_basic_stats(sa);
789 goto fail_reset_basic_stats;
791 for (i = 0; i < sw_stats->supp_count; i++) {
792 sw_stats->supp[i].cache = stat_cache;
793 stat_cache += sfc_sw_stat_get_queue_count(sa,
794 sw_stats->supp[i].descr);
799 fail_reset_basic_stats:
802 sa->sw_stats.cache_count = 0;
804 rte_free(*reset_vals);
807 sa->sw_stats.xstats_count = 0;
808 rte_free(sw_stats->supp);
809 sw_stats->supp = NULL;
810 sw_stats->supp_count = 0;
816 sfc_sw_xstats_free_queues_bitmap(struct sfc_adapter *sa)
818 rte_bitmap_free(sa->sw_stats.queues_bitmap);
819 rte_free(sa->sw_stats.queues_bitmap_mem);
823 sfc_sw_xstats_alloc_queues_bitmap(struct sfc_adapter *sa)
825 struct rte_bitmap **queues_bitmap = &sa->sw_stats.queues_bitmap;
826 void **queues_bitmap_mem = &sa->sw_stats.queues_bitmap_mem;
830 bmp_size = rte_bitmap_get_memory_footprint(RTE_MAX_QUEUES_PER_PORT);
831 *queues_bitmap_mem = NULL;
832 *queues_bitmap = NULL;
834 *queues_bitmap_mem = rte_calloc_socket("bitmap_mem", bmp_size, 1, 0,
836 if (*queues_bitmap_mem == NULL)
839 *queues_bitmap = rte_bitmap_init(RTE_MAX_QUEUES_PER_PORT,
840 *queues_bitmap_mem, bmp_size);
841 if (*queues_bitmap == NULL) {
846 rte_spinlock_init(&sa->sw_stats.queues_bitmap_lock);
850 sfc_sw_xstats_free_queues_bitmap(sa);
855 sfc_sw_xstats_init(struct sfc_adapter *sa)
857 sa->sw_stats.xstats_count = 0;
858 sa->sw_stats.supp = NULL;
859 sa->sw_stats.supp_count = 0;
860 sa->sw_stats.cache = NULL;
861 sa->sw_stats.cache_count = 0;
862 sa->sw_stats.reset_vals = NULL;
864 return sfc_sw_xstats_alloc_queues_bitmap(sa);
868 sfc_sw_xstats_close(struct sfc_adapter *sa)
870 sfc_sw_xstats_free_queues_bitmap(sa);
871 sa->sw_stats.reset_vals = NULL;
872 rte_free(sa->sw_stats.cache);
873 sa->sw_stats.cache = NULL;
874 sa->sw_stats.cache_count = 0;
875 rte_free(sa->sw_stats.reset_vals);
876 rte_free(sa->sw_stats.supp);
877 sa->sw_stats.supp = NULL;
878 sa->sw_stats.supp_count = 0;
879 sa->sw_stats.xstats_count = 0;