1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2021 Xilinx, Inc.
6 #include <rte_bitmap.h>
11 #include "sfc_sw_stats.h"
13 enum sfc_sw_stats_type {
18 typedef uint64_t sfc_get_sw_xstat_val_t(struct sfc_adapter *sa, uint16_t qid);
20 struct sfc_sw_xstat_descr {
22 enum sfc_sw_stats_type type;
23 sfc_get_sw_xstat_val_t *get_val;
26 static sfc_get_sw_xstat_val_t sfc_get_sw_xstat_val_rx_dbells;
28 sfc_get_sw_xstat_val_rx_dbells(struct sfc_adapter *sa, uint16_t qid)
30 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
31 struct sfc_rxq_info *rxq_info;
33 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, qid);
34 if (rxq_info->state & SFC_RXQ_INITIALIZED)
35 return rxq_info->dp->dpq.rx_dbells;
39 static sfc_get_sw_xstat_val_t sfc_get_sw_xstat_val_tx_dbells;
41 sfc_get_sw_xstat_val_tx_dbells(struct sfc_adapter *sa, uint16_t qid)
43 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
44 struct sfc_txq_info *txq_info;
46 txq_info = sfc_txq_info_by_ethdev_qid(sas, qid);
47 if (txq_info->state & SFC_TXQ_INITIALIZED)
48 return txq_info->dp->dpq.tx_dbells;
52 struct sfc_sw_xstat_descr sfc_sw_xstats[] = {
55 .type = SFC_SW_STATS_RX,
56 .get_val = sfc_get_sw_xstat_val_rx_dbells,
60 .type = SFC_SW_STATS_TX,
61 .get_val = sfc_get_sw_xstat_val_tx_dbells,
66 sfc_sw_stat_get_name(struct sfc_adapter *sa,
67 const struct sfc_sw_xstat_descr *sw_xstat, char *name,
68 size_t name_size, unsigned int id_off)
73 switch (sw_xstat->type) {
81 sfc_err(sa, "%s: unknown software statistics type %d",
82 __func__, sw_xstat->type);
87 ret = snprintf(name, name_size, "%s_%s", prefix,
89 if (ret < 0 || ret >= (int)name_size) {
90 sfc_err(sa, "%s: failed to fill xstat name %s_%s, err %d",
91 __func__, prefix, sw_xstat->name, ret);
92 return ret > 0 ? -EINVAL : ret;
95 uint16_t qid = id_off - 1;
96 ret = snprintf(name, name_size, "%s_q%u_%s", prefix, qid,
98 if (ret < 0 || ret >= (int)name_size) {
99 sfc_err(sa, "%s: failed to fill xstat name %s_q%u_%s, err %d",
100 __func__, prefix, qid, sw_xstat->name, ret);
101 return ret > 0 ? -EINVAL : ret;
109 sfc_sw_stat_get_queue_count(struct sfc_adapter *sa,
110 const struct sfc_sw_xstat_descr *sw_xstat)
112 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
114 switch (sw_xstat->type) {
115 case SFC_SW_STATS_RX:
116 return sas->ethdev_rxq_count;
117 case SFC_SW_STATS_TX:
118 return sas->ethdev_txq_count;
120 sfc_err(sa, "%s: unknown software statistics type %d",
121 __func__, sw_xstat->type);
127 sfc_sw_xstat_per_queue_get_count(unsigned int nb_queues)
129 /* Take into account the accumulative xstat of all queues */
130 return nb_queues > 0 ? 1 + nb_queues : 0;
134 sfc_sw_xstat_get_nb_supported(struct sfc_adapter *sa,
135 const struct sfc_sw_xstat_descr *sw_xstat)
137 unsigned int nb_queues;
139 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
140 return sfc_sw_xstat_per_queue_get_count(nb_queues);
144 sfc_sw_stat_get_names(struct sfc_adapter *sa,
145 const struct sfc_sw_xstat_descr *sw_xstat,
146 struct rte_eth_xstat_name *xstats_names,
147 unsigned int xstats_names_sz,
148 unsigned int *nb_written,
149 unsigned int *nb_supported)
151 const size_t name_size = sizeof(xstats_names[0].name);
152 unsigned int id_base = *nb_supported;
153 unsigned int nb_queues;
157 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
160 *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
163 * The order of each software xstat type is the accumulative xstat
164 * followed by per-queue xstats.
166 if (*nb_written < xstats_names_sz) {
167 rc = sfc_sw_stat_get_name(sa, sw_xstat,
168 xstats_names[*nb_written].name,
169 name_size, *nb_written - id_base);
175 for (qid = 0; qid < nb_queues; ++qid) {
176 if (*nb_written < xstats_names_sz) {
177 rc = sfc_sw_stat_get_name(sa, sw_xstat,
178 xstats_names[*nb_written].name,
179 name_size, *nb_written - id_base);
190 sfc_sw_xstat_get_names_by_id(struct sfc_adapter *sa,
191 const struct sfc_sw_xstat_descr *sw_xstat,
193 struct rte_eth_xstat_name *xstats_names,
195 unsigned int *nb_supported)
197 const size_t name_size = sizeof(xstats_names[0].name);
198 unsigned int id_base = *nb_supported;
199 unsigned int nb_queues;
203 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
206 *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
209 * The order of each software xstat type is the accumulative xstat
210 * followed by per-queue xstats.
212 for (i = 0; i < size; i++) {
213 if (id_base <= ids[i] && ids[i] <= id_base + nb_queues) {
214 rc = sfc_sw_stat_get_name(sa, sw_xstat,
215 xstats_names[i].name,
216 name_size, ids[i] - id_base);
226 sfc_sw_xstat_get_values(struct sfc_adapter *sa,
227 const struct sfc_sw_xstat_descr *sw_xstat,
228 struct rte_eth_xstat *xstats,
229 unsigned int xstats_size,
230 unsigned int *nb_written,
231 unsigned int *nb_supported)
235 struct rte_eth_xstat *accum_xstat;
236 bool count_accum_value = false;
237 unsigned int nb_queues;
239 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
242 *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
245 * The order of each software xstat type is the accumulative xstat
246 * followed by per-queue xstats.
248 if (*nb_written < xstats_size) {
249 count_accum_value = true;
250 accum_xstat = &xstats[*nb_written];
251 xstats[*nb_written].id = *nb_written;
252 xstats[*nb_written].value = 0;
256 for (qid = 0; qid < nb_queues; ++qid) {
257 value = sw_xstat->get_val(sa, qid);
259 if (*nb_written < xstats_size) {
260 xstats[*nb_written].id = *nb_written;
261 xstats[*nb_written].value = value;
265 if (count_accum_value)
266 accum_xstat->value += value;
271 sfc_sw_xstat_get_values_by_id(struct sfc_adapter *sa,
272 const struct sfc_sw_xstat_descr *sw_xstat,
275 unsigned int ids_size,
276 unsigned int *nb_supported)
278 rte_spinlock_t *bmp_lock = &sa->sw_xstats.queues_bitmap_lock;
279 struct rte_bitmap *bmp = sa->sw_xstats.queues_bitmap;
280 unsigned int id_base = *nb_supported;
281 bool count_accum_value = false;
282 unsigned int accum_value_idx;
283 uint64_t accum_value = 0;
285 unsigned int nb_queues;
288 rte_spinlock_lock(bmp_lock);
289 rte_bitmap_reset(bmp);
291 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
294 *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
297 * The order of each software xstat type is the accumulative xstat
298 * followed by per-queue xstats.
300 for (i = 0; i < ids_size; i++) {
301 if (id_base <= ids[i] && ids[i] <= (id_base + nb_queues)) {
302 if (ids[i] == id_base) { /* Accumulative value */
303 count_accum_value = true;
307 qid = ids[i] - id_base - 1;
308 values[i] = sw_xstat->get_val(sa, qid);
309 accum_value += values[i];
311 rte_bitmap_set(bmp, qid);
315 if (count_accum_value) {
316 for (qid = 0; qid < nb_queues; ++qid) {
317 if (rte_bitmap_get(bmp, qid) != 0)
319 values[accum_value_idx] += sw_xstat->get_val(sa, qid);
321 values[accum_value_idx] += accum_value;
325 rte_spinlock_unlock(bmp_lock);
329 sfc_sw_xstats_get_nb_supported(struct sfc_adapter *sa)
331 unsigned int nb_supported = 0;
334 SFC_ASSERT(sfc_adapter_is_locked(sa));
336 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
337 nb_supported += sfc_sw_xstat_get_nb_supported(sa,
345 sfc_sw_xstats_get_vals(struct sfc_adapter *sa,
346 struct rte_eth_xstat *xstats,
347 unsigned int xstats_count,
348 unsigned int *nb_written,
349 unsigned int *nb_supported)
351 uint64_t *reset_vals = sa->sw_xstats.reset_vals;
352 unsigned int sw_xstats_offset;
355 sfc_adapter_lock(sa);
357 sw_xstats_offset = *nb_supported;
359 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
360 sfc_sw_xstat_get_values(sa, &sfc_sw_xstats[i], xstats,
361 xstats_count, nb_written, nb_supported);
364 for (i = sw_xstats_offset; i < *nb_written; i++)
365 xstats[i].value -= reset_vals[i - sw_xstats_offset];
367 sfc_adapter_unlock(sa);
371 sfc_sw_xstats_get_names(struct sfc_adapter *sa,
372 struct rte_eth_xstat_name *xstats_names,
373 unsigned int xstats_count,
374 unsigned int *nb_written,
375 unsigned int *nb_supported)
380 sfc_adapter_lock(sa);
382 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
383 ret = sfc_sw_stat_get_names(sa, &sfc_sw_xstats[i],
384 xstats_names, xstats_count,
385 nb_written, nb_supported);
387 sfc_adapter_unlock(sa);
392 sfc_adapter_unlock(sa);
398 sfc_sw_xstats_get_vals_by_id(struct sfc_adapter *sa,
402 unsigned int *nb_supported)
404 uint64_t *reset_vals = sa->sw_xstats.reset_vals;
405 unsigned int sw_xstats_offset;
408 sfc_adapter_lock(sa);
410 sw_xstats_offset = *nb_supported;
412 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
413 sfc_sw_xstat_get_values_by_id(sa, &sfc_sw_xstats[i], ids,
414 values, n, nb_supported);
417 for (i = 0; i < n; i++) {
418 if (sw_xstats_offset <= ids[i] && ids[i] < *nb_supported)
419 values[i] -= reset_vals[ids[i] - sw_xstats_offset];
422 sfc_adapter_unlock(sa);
426 sfc_sw_xstats_get_names_by_id(struct sfc_adapter *sa,
428 struct rte_eth_xstat_name *xstats_names,
430 unsigned int *nb_supported)
435 sfc_adapter_lock(sa);
437 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
438 ret = sfc_sw_xstat_get_names_by_id(sa, &sfc_sw_xstats[i], ids,
442 sfc_adapter_unlock(sa);
448 sfc_adapter_unlock(sa);
454 sfc_sw_xstat_reset(struct sfc_adapter *sa, struct sfc_sw_xstat_descr *sw_xstat,
455 uint64_t *reset_vals)
457 unsigned int nb_queues;
459 uint64_t *accum_xstat_reset;
461 SFC_ASSERT(sfc_adapter_is_locked(sa));
463 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
468 * The order of each software xstat type is the accumulative xstat
469 * followed by per-queue xstats.
471 accum_xstat_reset = reset_vals;
472 *accum_xstat_reset = 0;
475 for (qid = 0; qid < nb_queues; ++qid) {
476 reset_vals[qid] = sw_xstat->get_val(sa, qid);
477 *accum_xstat_reset += reset_vals[qid];
482 sfc_sw_xstats_reset(struct sfc_adapter *sa)
484 uint64_t *reset_vals = sa->sw_xstats.reset_vals;
485 struct sfc_sw_xstat_descr *sw_xstat;
488 SFC_ASSERT(sfc_adapter_is_locked(sa));
490 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
491 sw_xstat = &sfc_sw_xstats[i];
492 sfc_sw_xstat_reset(sa, sw_xstat, reset_vals);
493 reset_vals += sfc_sw_xstat_get_nb_supported(sa, sw_xstat);
498 sfc_sw_xstats_configure(struct sfc_adapter *sa)
500 uint64_t **reset_vals = &sa->sw_xstats.reset_vals;
501 size_t nb_supported = 0;
504 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++)
505 nb_supported += sfc_sw_xstat_get_nb_supported(sa,
508 *reset_vals = rte_realloc(*reset_vals,
509 nb_supported * sizeof(**reset_vals), 0);
510 if (*reset_vals == NULL)
513 memset(*reset_vals, 0, nb_supported * sizeof(**reset_vals));
519 sfc_sw_xstats_free_queues_bitmap(struct sfc_adapter *sa)
521 rte_bitmap_free(sa->sw_xstats.queues_bitmap);
522 rte_free(sa->sw_xstats.queues_bitmap_mem);
526 sfc_sw_xstats_alloc_queues_bitmap(struct sfc_adapter *sa)
528 struct rte_bitmap **queues_bitmap = &sa->sw_xstats.queues_bitmap;
529 void **queues_bitmap_mem = &sa->sw_xstats.queues_bitmap_mem;
533 bmp_size = rte_bitmap_get_memory_footprint(RTE_MAX_QUEUES_PER_PORT);
534 *queues_bitmap_mem = NULL;
535 *queues_bitmap = NULL;
537 *queues_bitmap_mem = rte_calloc_socket("bitmap_mem", bmp_size, 1, 0,
539 if (*queues_bitmap_mem == NULL)
542 *queues_bitmap = rte_bitmap_init(RTE_MAX_QUEUES_PER_PORT,
543 *queues_bitmap_mem, bmp_size);
544 if (*queues_bitmap == NULL) {
549 rte_spinlock_init(&sa->sw_xstats.queues_bitmap_lock);
553 sfc_sw_xstats_free_queues_bitmap(sa);
558 sfc_sw_xstats_init(struct sfc_adapter *sa)
560 sa->sw_xstats.reset_vals = NULL;
562 return sfc_sw_xstats_alloc_queues_bitmap(sa);
566 sfc_sw_xstats_close(struct sfc_adapter *sa)
568 rte_free(sa->sw_xstats.reset_vals);
569 sa->sw_xstats.reset_vals = NULL;
571 sfc_sw_xstats_free_queues_bitmap(sa);