1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2021 Xilinx, Inc.
6 #include <rte_bitmap.h>
11 #include "sfc_sw_stats.h"
13 #define SFC_SW_STAT_INVALID UINT64_MAX
15 #define SFC_SW_STATS_GROUP_SIZE_MAX 2U
16 #define SFC_SW_STAT_GOOD_PACKETS "packets"
17 #define SFC_SW_STAT_GOOD_BYTES "bytes"
19 enum sfc_sw_stats_type {
24 enum sfc_sw_stats_group_basic {
25 SFC_SW_STATS_GROUP_BASIC_PKTS = 0,
26 SFC_SW_STATS_GROUP_BASIC_BYTES,
27 SFX_SW_STATS_GROUP_BASIC_MAX
30 typedef void sfc_get_sw_stat_val_t(struct sfc_adapter *sa, uint16_t qid,
31 uint64_t *values, unsigned int values_count);
33 struct sfc_sw_stat_descr {
35 enum sfc_sw_stats_type type;
36 sfc_get_sw_stat_val_t *get_val;
40 static sfc_get_sw_stat_val_t sfc_sw_stat_get_rx_good_pkts_bytes;
42 sfc_sw_stat_get_rx_good_pkts_bytes(struct sfc_adapter *sa, uint16_t qid,
44 unsigned int values_count)
46 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
47 struct sfc_rxq_info *rxq_info;
48 union sfc_pkts_bytes qstats;
50 RTE_SET_USED(values_count);
51 SFC_ASSERT(values_count == SFX_SW_STATS_GROUP_BASIC_MAX);
52 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, qid);
53 if (rxq_info->state & SFC_RXQ_INITIALIZED) {
54 sfc_pkts_bytes_get(&rxq_info->dp->dpq.stats, &qstats);
55 values[SFC_SW_STATS_GROUP_BASIC_PKTS] = qstats.pkts;
56 values[SFC_SW_STATS_GROUP_BASIC_BYTES] = qstats.bytes;
58 values[SFC_SW_STATS_GROUP_BASIC_PKTS] = 0;
59 values[SFC_SW_STATS_GROUP_BASIC_BYTES] = 0;
63 static sfc_get_sw_stat_val_t sfc_get_sw_stat_val_rx_dbells;
65 sfc_get_sw_stat_val_rx_dbells(struct sfc_adapter *sa, uint16_t qid,
66 uint64_t *values, unsigned int values_count)
68 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
69 struct sfc_rxq_info *rxq_info;
71 RTE_SET_USED(values_count);
72 SFC_ASSERT(values_count == 1);
73 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, qid);
74 values[0] = rxq_info->state & SFC_RXQ_INITIALIZED ?
75 rxq_info->dp->dpq.rx_dbells : 0;
78 static sfc_get_sw_stat_val_t sfc_get_sw_stat_val_tx_dbells;
80 sfc_get_sw_stat_val_tx_dbells(struct sfc_adapter *sa, uint16_t qid,
81 uint64_t *values, unsigned int values_count)
83 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
84 struct sfc_txq_info *txq_info;
86 RTE_SET_USED(values_count);
87 SFC_ASSERT(values_count == 1);
88 txq_info = sfc_txq_info_by_ethdev_qid(sas, qid);
89 values[0] = txq_info->state & SFC_TXQ_INITIALIZED ?
90 txq_info->dp->dpq.tx_dbells : 0;
94 * SW stats can be grouped together. When stats are grouped the corresponding
95 * stats values for each queue are obtained during calling one get value
96 * callback. Stats of the same group are contiguous in the structure below.
97 * The start of the group is denoted by stat implementing get value callback.
99 const struct sfc_sw_stat_descr sfc_sw_stats_descr[] = {
100 /* Group of Rx packets/bytes stats */
102 .name = SFC_SW_STAT_GOOD_PACKETS,
103 .type = SFC_SW_STATS_RX,
104 .get_val = sfc_sw_stat_get_rx_good_pkts_bytes,
105 .provide_total = false,
108 .name = SFC_SW_STAT_GOOD_BYTES,
109 .type = SFC_SW_STATS_RX,
111 .provide_total = false,
113 /* End of basic stats */
116 .type = SFC_SW_STATS_RX,
117 .get_val = sfc_get_sw_stat_val_rx_dbells,
118 .provide_total = true,
122 .type = SFC_SW_STATS_TX,
123 .get_val = sfc_get_sw_stat_val_tx_dbells,
124 .provide_total = true,
129 sfc_sw_stat_get_name(struct sfc_adapter *sa,
130 const struct sfc_sw_stat_descr *sw_stat, char *name,
131 size_t name_size, unsigned int id_off)
136 switch (sw_stat->type) {
137 case SFC_SW_STATS_RX:
140 case SFC_SW_STATS_TX:
144 sfc_err(sa, "%s: unknown software statistics type %d",
145 __func__, sw_stat->type);
149 if (sw_stat->provide_total && id_off == 0) {
150 ret = snprintf(name, name_size, "%s_%s", prefix,
152 if (ret < 0 || ret >= (int)name_size) {
153 sfc_err(sa, "%s: failed to fill xstat name %s_%s, err %d",
154 __func__, prefix, sw_stat->name, ret);
155 return ret > 0 ? -EINVAL : ret;
158 uint16_t qid = id_off - sw_stat->provide_total;
159 ret = snprintf(name, name_size, "%s_q%u_%s", prefix, qid,
161 if (ret < 0 || ret >= (int)name_size) {
162 sfc_err(sa, "%s: failed to fill xstat name %s_q%u_%s, err %d",
163 __func__, prefix, qid, sw_stat->name, ret);
164 return ret > 0 ? -EINVAL : ret;
172 sfc_sw_stat_get_queue_count(struct sfc_adapter *sa,
173 const struct sfc_sw_stat_descr *sw_stat)
175 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
177 switch (sw_stat->type) {
178 case SFC_SW_STATS_RX:
179 return sas->ethdev_rxq_count;
180 case SFC_SW_STATS_TX:
181 return sas->ethdev_txq_count;
183 sfc_err(sa, "%s: unknown software statistics type %d",
184 __func__, sw_stat->type);
190 sfc_sw_xstat_per_queue_get_count(const struct sfc_sw_stat_descr *sw_stat,
191 unsigned int nb_queues)
193 /* Take into account the total xstat of all queues */
194 return nb_queues > 0 ? sw_stat->provide_total + nb_queues : 0;
198 sfc_sw_xstat_get_nb_supported(struct sfc_adapter *sa,
199 const struct sfc_sw_stat_descr *sw_stat)
201 unsigned int nb_queues;
203 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
204 return sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
208 sfc_sw_stat_get_names(struct sfc_adapter *sa,
209 const struct sfc_sw_stat_descr *sw_stat,
210 struct rte_eth_xstat_name *xstats_names,
211 unsigned int xstats_names_sz,
212 unsigned int *nb_written,
213 unsigned int *nb_supported)
215 const size_t name_size = sizeof(xstats_names[0].name);
216 unsigned int id_base = *nb_supported;
217 unsigned int nb_queues;
221 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
224 *nb_supported += sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
227 * The order of each software xstat type is the total xstat
228 * followed by per-queue xstats.
230 if (*nb_written < xstats_names_sz && sw_stat->provide_total) {
231 rc = sfc_sw_stat_get_name(sa, sw_stat,
232 xstats_names[*nb_written].name,
233 name_size, *nb_written - id_base);
239 for (qid = 0; qid < nb_queues; ++qid) {
240 if (*nb_written < xstats_names_sz) {
241 rc = sfc_sw_stat_get_name(sa, sw_stat,
242 xstats_names[*nb_written].name,
243 name_size, *nb_written - id_base);
254 sfc_sw_xstat_get_names_by_id(struct sfc_adapter *sa,
255 const struct sfc_sw_stat_descr *sw_stat,
257 struct rte_eth_xstat_name *xstats_names,
259 unsigned int *nb_supported)
261 const size_t name_size = sizeof(xstats_names[0].name);
262 unsigned int id_base = *nb_supported;
264 unsigned int nb_queues;
268 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
271 *nb_supported += sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
274 * The order of each software xstat type is the total xstat
275 * followed by per-queue xstats.
277 id_end = id_base + sw_stat->provide_total + nb_queues;
278 for (i = 0; i < size; i++) {
279 if (id_base <= ids[i] && ids[i] < id_end) {
280 rc = sfc_sw_stat_get_name(sa, sw_stat,
281 xstats_names[i].name,
282 name_size, ids[i] - id_base);
292 sfc_sw_stat_get_val(struct sfc_adapter *sa,
293 unsigned int sw_stat_idx, uint16_t qid)
295 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
296 uint64_t *res = &sw_stats->supp[sw_stat_idx].cache[qid];
297 uint64_t values[SFC_SW_STATS_GROUP_SIZE_MAX];
298 unsigned int group_start_idx;
299 unsigned int group_size;
302 if (*res != SFC_SW_STAT_INVALID)
306 * Search for the group start, i.e. the stat that implements
307 * get value callback.
309 group_start_idx = sw_stat_idx;
310 while (sw_stats->supp[group_start_idx].descr->get_val == NULL)
314 * Calculate number of elements in the group with loop till the next
315 * group start or the list end.
318 for (i = sw_stat_idx + 1; i < sw_stats->supp_count; i++) {
319 if (sw_stats->supp[i].descr->get_val != NULL)
323 group_size += sw_stat_idx - group_start_idx;
325 SFC_ASSERT(group_size <= SFC_SW_STATS_GROUP_SIZE_MAX);
326 sw_stats->supp[group_start_idx].descr->get_val(sa, qid, values,
328 for (i = group_start_idx; i < (group_start_idx + group_size); i++)
329 sw_stats->supp[i].cache[qid] = values[i - group_start_idx];
335 sfc_sw_xstat_get_values(struct sfc_adapter *sa,
336 const struct sfc_sw_stat_descr *sw_stat,
337 unsigned int sw_stat_idx,
338 struct rte_eth_xstat *xstats,
339 unsigned int xstats_size,
340 unsigned int *nb_written,
341 unsigned int *nb_supported)
345 struct rte_eth_xstat *total_xstat;
346 bool count_total_value = false;
347 unsigned int nb_queues;
349 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
352 *nb_supported += sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
355 * The order of each software xstat type is the total xstat
356 * followed by per-queue xstats.
358 if (*nb_written < xstats_size && sw_stat->provide_total) {
359 count_total_value = true;
360 total_xstat = &xstats[*nb_written];
361 xstats[*nb_written].id = *nb_written;
362 xstats[*nb_written].value = 0;
366 for (qid = 0; qid < nb_queues; ++qid) {
367 value = sfc_sw_stat_get_val(sa, sw_stat_idx, qid);
369 if (*nb_written < xstats_size) {
370 xstats[*nb_written].id = *nb_written;
371 xstats[*nb_written].value = value;
375 if (count_total_value)
376 total_xstat->value += value;
381 sfc_sw_xstat_get_values_by_id(struct sfc_adapter *sa,
382 const struct sfc_sw_stat_descr *sw_stat,
383 unsigned int sw_stat_idx,
386 unsigned int ids_size,
387 unsigned int *nb_supported)
389 rte_spinlock_t *bmp_lock = &sa->sw_stats.queues_bitmap_lock;
390 struct rte_bitmap *bmp = sa->sw_stats.queues_bitmap;
391 unsigned int id_base = *nb_supported;
392 unsigned int id_base_q;
394 bool count_total_value = false;
395 unsigned int total_value_idx;
396 uint64_t total_value = 0;
398 unsigned int nb_queues;
401 rte_spinlock_lock(bmp_lock);
402 rte_bitmap_reset(bmp);
404 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
407 *nb_supported += sfc_sw_xstat_per_queue_get_count(sw_stat, nb_queues);
410 * The order of each software xstat type is the total xstat
411 * followed by per-queue xstats.
413 id_end = id_base + sw_stat->provide_total + nb_queues;
414 for (i = 0; i < ids_size; i++) {
415 if (id_base <= ids[i] && ids[i] < id_end) {
416 if (sw_stat->provide_total && ids[i] == id_base) {
417 /* Accumulative value */
418 count_total_value = true;
422 id_base_q = id_base + sw_stat->provide_total;
423 qid = ids[i] - id_base_q;
424 values[i] = sfc_sw_stat_get_val(sa, sw_stat_idx, qid);
425 total_value += values[i];
427 rte_bitmap_set(bmp, qid);
431 if (count_total_value) {
432 values[total_value_idx] = 0;
433 for (qid = 0; qid < nb_queues; ++qid) {
434 if (rte_bitmap_get(bmp, qid) != 0)
436 values[total_value_idx] += sfc_sw_stat_get_val(sa,
440 values[total_value_idx] += total_value;
444 rte_spinlock_unlock(bmp_lock);
448 sfc_sw_xstats_get_nb_supported(struct sfc_adapter *sa)
450 SFC_ASSERT(sfc_adapter_is_locked(sa));
451 return sa->sw_stats.xstats_count;
455 sfc_sw_stats_clear_cache(struct sfc_adapter *sa)
457 unsigned int cache_count = sa->sw_stats.cache_count;
458 uint64_t *cache = sa->sw_stats.cache;
460 RTE_BUILD_BUG_ON(UINT64_C(0xffffffffffffffff) != SFC_SW_STAT_INVALID);
461 memset(cache, 0xff, cache_count * sizeof(*cache));
465 sfc_sw_xstats_get_vals(struct sfc_adapter *sa,
466 struct rte_eth_xstat *xstats,
467 unsigned int xstats_count,
468 unsigned int *nb_written,
469 unsigned int *nb_supported)
471 uint64_t *reset_vals = sa->sw_stats.reset_vals;
472 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
473 unsigned int sw_xstats_offset;
476 sfc_adapter_lock(sa);
478 sfc_sw_stats_clear_cache(sa);
480 sw_xstats_offset = *nb_supported;
482 for (i = 0; i < sw_stats->supp_count; i++) {
483 sfc_sw_xstat_get_values(sa, sw_stats->supp[i].descr, i,
484 xstats, xstats_count, nb_written, nb_supported);
487 for (i = sw_xstats_offset; i < *nb_written; i++)
488 xstats[i].value -= reset_vals[i - sw_xstats_offset];
490 sfc_adapter_unlock(sa);
494 sfc_sw_xstats_get_names(struct sfc_adapter *sa,
495 struct rte_eth_xstat_name *xstats_names,
496 unsigned int xstats_count,
497 unsigned int *nb_written,
498 unsigned int *nb_supported)
500 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
504 sfc_adapter_lock(sa);
506 for (i = 0; i < sw_stats->supp_count; i++) {
507 ret = sfc_sw_stat_get_names(sa, sw_stats->supp[i].descr,
508 xstats_names, xstats_count,
509 nb_written, nb_supported);
511 sfc_adapter_unlock(sa);
516 sfc_adapter_unlock(sa);
522 sfc_sw_xstats_get_vals_by_id(struct sfc_adapter *sa,
526 unsigned int *nb_supported)
528 uint64_t *reset_vals = sa->sw_stats.reset_vals;
529 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
530 unsigned int sw_xstats_offset;
533 sfc_adapter_lock(sa);
535 sfc_sw_stats_clear_cache(sa);
537 sw_xstats_offset = *nb_supported;
539 for (i = 0; i < sw_stats->supp_count; i++) {
540 sfc_sw_xstat_get_values_by_id(sa, sw_stats->supp[i].descr, i,
541 ids, values, n, nb_supported);
544 for (i = 0; i < n; i++) {
545 if (sw_xstats_offset <= ids[i] && ids[i] < *nb_supported)
546 values[i] -= reset_vals[ids[i] - sw_xstats_offset];
549 sfc_adapter_unlock(sa);
553 sfc_sw_xstats_get_names_by_id(struct sfc_adapter *sa,
555 struct rte_eth_xstat_name *xstats_names,
557 unsigned int *nb_supported)
559 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
563 sfc_adapter_lock(sa);
565 for (i = 0; i < sw_stats->supp_count; i++) {
566 ret = sfc_sw_xstat_get_names_by_id(sa, sw_stats->supp[i].descr,
567 ids, xstats_names, size,
570 sfc_adapter_unlock(sa);
576 sfc_adapter_unlock(sa);
582 sfc_sw_xstat_reset(struct sfc_adapter *sa,
583 const struct sfc_sw_stat_descr *sw_stat,
584 unsigned int sw_stat_idx,
585 uint64_t *reset_vals)
587 unsigned int nb_queues;
589 uint64_t *total_xstat_reset = NULL;
591 SFC_ASSERT(sfc_adapter_is_locked(sa));
593 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_stat);
598 * The order of each software xstat type is the total xstat
599 * followed by per-queue xstats.
601 if (sw_stat->provide_total) {
602 total_xstat_reset = reset_vals;
603 *total_xstat_reset = 0;
607 for (qid = 0; qid < nb_queues; ++qid) {
608 reset_vals[qid] = sfc_sw_stat_get_val(sa, sw_stat_idx, qid);
609 if (sw_stat->provide_total)
610 *total_xstat_reset += reset_vals[qid];
615 sfc_sw_xstats_reset(struct sfc_adapter *sa)
617 uint64_t *reset_vals = sa->sw_stats.reset_vals;
618 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
621 SFC_ASSERT(sfc_adapter_is_locked(sa));
623 sfc_sw_stats_clear_cache(sa);
625 for (i = 0; i < sw_stats->supp_count; i++) {
626 sfc_sw_xstat_reset(sa, sw_stats->supp[i].descr, i, reset_vals);
627 reset_vals += sfc_sw_xstat_get_nb_supported(sa,
628 sw_stats->supp[i].descr);
633 sfc_sw_stats_is_packets_or_bytes(const char *xstat_name)
635 return strcmp(xstat_name, SFC_SW_STAT_GOOD_PACKETS) == 0 ||
636 strcmp(xstat_name, SFC_SW_STAT_GOOD_BYTES) == 0;
640 sfc_sw_stats_fill_available_descr(struct sfc_adapter *sa)
642 const struct sfc_adapter_priv *sap = &sa->priv;
643 bool have_dp_rx_stats = sap->dp_rx->features & SFC_DP_RX_FEAT_STATS;
644 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
645 const struct sfc_sw_stat_descr *sw_stat_descr;
648 sw_stats->supp_count = 0;
649 for (i = 0; i < RTE_DIM(sfc_sw_stats_descr); i++) {
650 sw_stat_descr = &sfc_sw_stats_descr[i];
651 if (!have_dp_rx_stats &&
652 sw_stat_descr->type == SFC_SW_STATS_RX &&
653 sfc_sw_stats_is_packets_or_bytes(sw_stat_descr->name))
655 sw_stats->supp[sw_stats->supp_count].descr = sw_stat_descr;
656 sw_stats->supp_count++;
661 sfc_sw_stats_set_reset_basic_stats(struct sfc_adapter *sa)
663 uint64_t *reset_vals = sa->sw_stats.reset_vals;
664 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
665 const struct sfc_sw_stat_descr *sw_stat;
668 for (i = 0; i < sw_stats->supp_count; i++) {
669 sw_stat = sw_stats->supp[i].descr;
671 switch (sw_stat->type) {
672 case SFC_SW_STATS_RX:
673 if (strcmp(sw_stat->name,
674 SFC_SW_STAT_GOOD_PACKETS) == 0)
675 sa->sw_stats.reset_rx_pkts = reset_vals;
676 else if (strcmp(sw_stat->name,
677 SFC_SW_STAT_GOOD_BYTES) == 0)
678 sa->sw_stats.reset_rx_bytes = reset_vals;
680 case SFC_SW_STATS_TX:
682 SFC_GENERIC_LOG(ERR, "Unknown SW stat type");
686 reset_vals += sfc_sw_xstat_get_nb_supported(sa, sw_stat);
693 sfc_sw_xstats_configure(struct sfc_adapter *sa)
695 uint64_t **reset_vals = &sa->sw_stats.reset_vals;
696 struct sfc_sw_stats *sw_stats = &sa->sw_stats;
697 unsigned int cache_count = 0;
698 uint64_t **cache = &sa->sw_stats.cache;
699 uint64_t *stat_cache;
700 size_t nb_supported = 0;
704 sw_stats->supp_count = RTE_DIM(sfc_sw_stats_descr);
705 if (sw_stats->supp == NULL) {
706 sw_stats->supp = rte_malloc(NULL, sw_stats->supp_count *
707 sizeof(*sw_stats->supp), 0);
708 if (sw_stats->supp == NULL)
711 for (i = 0; i < sw_stats->supp_count; i++)
712 sw_stats->supp[i].descr = &sfc_sw_stats_descr[i];
713 sfc_sw_stats_fill_available_descr(sa);
715 for (i = 0; i < sw_stats->supp_count; i++) {
716 nb_supported += sfc_sw_xstat_get_nb_supported(sa,
717 sw_stats->supp[i].descr);
718 cache_count += sfc_sw_stat_get_queue_count(sa,
719 sw_stats->supp[i].descr);
721 sa->sw_stats.xstats_count = nb_supported;
723 *reset_vals = rte_realloc(*reset_vals,
724 nb_supported * sizeof(**reset_vals), 0);
725 if (*reset_vals == NULL) {
727 goto fail_reset_vals;
730 memset(*reset_vals, 0, nb_supported * sizeof(**reset_vals));
732 *cache = rte_realloc(*cache, cache_count * sizeof(*cache), 0);
733 if (*cache == NULL) {
737 sa->sw_stats.cache_count = cache_count;
739 rc = sfc_sw_stats_set_reset_basic_stats(sa);
741 goto fail_reset_basic_stats;
743 for (i = 0; i < sw_stats->supp_count; i++) {
744 sw_stats->supp[i].cache = stat_cache;
745 stat_cache += sfc_sw_stat_get_queue_count(sa,
746 sw_stats->supp[i].descr);
751 fail_reset_basic_stats:
754 sa->sw_stats.cache_count = 0;
756 rte_free(*reset_vals);
759 sa->sw_stats.xstats_count = 0;
760 rte_free(sw_stats->supp);
761 sw_stats->supp = NULL;
762 sw_stats->supp_count = 0;
768 sfc_sw_xstats_free_queues_bitmap(struct sfc_adapter *sa)
770 rte_bitmap_free(sa->sw_stats.queues_bitmap);
771 rte_free(sa->sw_stats.queues_bitmap_mem);
775 sfc_sw_xstats_alloc_queues_bitmap(struct sfc_adapter *sa)
777 struct rte_bitmap **queues_bitmap = &sa->sw_stats.queues_bitmap;
778 void **queues_bitmap_mem = &sa->sw_stats.queues_bitmap_mem;
782 bmp_size = rte_bitmap_get_memory_footprint(RTE_MAX_QUEUES_PER_PORT);
783 *queues_bitmap_mem = NULL;
784 *queues_bitmap = NULL;
786 *queues_bitmap_mem = rte_calloc_socket("bitmap_mem", bmp_size, 1, 0,
788 if (*queues_bitmap_mem == NULL)
791 *queues_bitmap = rte_bitmap_init(RTE_MAX_QUEUES_PER_PORT,
792 *queues_bitmap_mem, bmp_size);
793 if (*queues_bitmap == NULL) {
798 rte_spinlock_init(&sa->sw_stats.queues_bitmap_lock);
802 sfc_sw_xstats_free_queues_bitmap(sa);
807 sfc_sw_xstats_init(struct sfc_adapter *sa)
809 sa->sw_stats.xstats_count = 0;
810 sa->sw_stats.supp = NULL;
811 sa->sw_stats.supp_count = 0;
812 sa->sw_stats.cache = NULL;
813 sa->sw_stats.cache_count = 0;
814 sa->sw_stats.reset_vals = NULL;
816 return sfc_sw_xstats_alloc_queues_bitmap(sa);
820 sfc_sw_xstats_close(struct sfc_adapter *sa)
822 sfc_sw_xstats_free_queues_bitmap(sa);
823 sa->sw_stats.reset_vals = NULL;
824 rte_free(sa->sw_stats.cache);
825 sa->sw_stats.cache = NULL;
826 sa->sw_stats.cache_count = 0;
827 rte_free(sa->sw_stats.reset_vals);
828 rte_free(sa->sw_stats.supp);
829 sa->sw_stats.supp = NULL;
830 sa->sw_stats.supp_count = 0;
831 sa->sw_stats.xstats_count = 0;