1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2021 Xilinx, Inc.
6 #include <rte_bitmap.h>
11 #include "sfc_sw_stats.h"
13 enum sfc_sw_stats_type {
18 typedef uint64_t sfc_get_sw_xstat_val_t(struct sfc_adapter *sa, uint16_t qid);
20 struct sfc_sw_xstat_descr {
22 enum sfc_sw_stats_type type;
23 sfc_get_sw_xstat_val_t *get_val;
26 static sfc_get_sw_xstat_val_t sfc_get_sw_xstat_val_rx_dbells;
28 sfc_get_sw_xstat_val_rx_dbells(struct sfc_adapter *sa, uint16_t qid)
30 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
31 struct sfc_rxq_info *rxq_info;
33 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, qid);
34 if (rxq_info->state & SFC_RXQ_INITIALIZED)
35 return rxq_info->dp->dpq.rx_dbells;
39 static sfc_get_sw_xstat_val_t sfc_get_sw_xstat_val_tx_dbells;
41 sfc_get_sw_xstat_val_tx_dbells(struct sfc_adapter *sa, uint16_t qid)
43 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
44 struct sfc_txq_info *txq_info;
46 txq_info = sfc_txq_info_by_ethdev_qid(sas, qid);
47 if (txq_info->state & SFC_TXQ_INITIALIZED)
48 return txq_info->dp->dpq.tx_dbells;
52 struct sfc_sw_xstat_descr sfc_sw_xstats[] = {
55 .type = SFC_SW_STATS_RX,
56 .get_val = sfc_get_sw_xstat_val_rx_dbells,
60 .type = SFC_SW_STATS_TX,
61 .get_val = sfc_get_sw_xstat_val_tx_dbells,
66 sfc_sw_stat_get_name(struct sfc_adapter *sa,
67 const struct sfc_sw_xstat_descr *sw_xstat, char *name,
68 size_t name_size, unsigned int id_off)
73 switch (sw_xstat->type) {
81 sfc_err(sa, "%s: unknown software statistics type %d",
82 __func__, sw_xstat->type);
87 ret = snprintf(name, name_size, "%s_%s", prefix,
89 if (ret < 0 || ret >= (int)name_size) {
90 sfc_err(sa, "%s: failed to fill xstat name %s_%s, err %d",
91 __func__, prefix, sw_xstat->name, ret);
92 return ret > 0 ? -EINVAL : ret;
95 uint16_t qid = id_off - 1;
96 ret = snprintf(name, name_size, "%s_q%u_%s", prefix, qid,
98 if (ret < 0 || ret >= (int)name_size) {
99 sfc_err(sa, "%s: failed to fill xstat name %s_q%u_%s, err %d",
100 __func__, prefix, qid, sw_xstat->name, ret);
101 return ret > 0 ? -EINVAL : ret;
109 sfc_sw_stat_get_queue_count(struct sfc_adapter *sa,
110 const struct sfc_sw_xstat_descr *sw_xstat)
112 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
114 switch (sw_xstat->type) {
115 case SFC_SW_STATS_RX:
116 return sas->ethdev_rxq_count;
117 case SFC_SW_STATS_TX:
118 return sas->ethdev_txq_count;
120 sfc_err(sa, "%s: unknown software statistics type %d",
121 __func__, sw_xstat->type);
127 sfc_sw_xstat_per_queue_get_count(unsigned int nb_queues)
129 /* Take into account the accumulative xstat of all queues */
130 return nb_queues > 0 ? 1 + nb_queues : 0;
134 sfc_sw_xstat_get_nb_supported(struct sfc_adapter *sa,
135 const struct sfc_sw_xstat_descr *sw_xstat)
137 unsigned int nb_queues;
139 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
140 return sfc_sw_xstat_per_queue_get_count(nb_queues);
144 sfc_sw_stat_get_names(struct sfc_adapter *sa,
145 const struct sfc_sw_xstat_descr *sw_xstat,
146 struct rte_eth_xstat_name *xstats_names,
147 unsigned int xstats_names_sz,
148 unsigned int *nb_written,
149 unsigned int *nb_supported)
151 const size_t name_size = sizeof(xstats_names[0].name);
152 unsigned int id_base = *nb_supported;
153 unsigned int nb_queues;
157 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
160 *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
163 * The order of each software xstat type is the accumulative xstat
164 * followed by per-queue xstats.
166 if (*nb_written < xstats_names_sz) {
167 rc = sfc_sw_stat_get_name(sa, sw_xstat,
168 xstats_names[*nb_written].name,
169 name_size, *nb_written - id_base);
175 for (qid = 0; qid < nb_queues; ++qid) {
176 if (*nb_written < xstats_names_sz) {
177 rc = sfc_sw_stat_get_name(sa, sw_xstat,
178 xstats_names[*nb_written].name,
179 name_size, *nb_written - id_base);
190 sfc_sw_xstat_get_names_by_id(struct sfc_adapter *sa,
191 const struct sfc_sw_xstat_descr *sw_xstat,
193 struct rte_eth_xstat_name *xstats_names,
195 unsigned int *nb_supported)
197 const size_t name_size = sizeof(xstats_names[0].name);
198 unsigned int id_base = *nb_supported;
199 unsigned int nb_queues;
203 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
206 *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
209 * The order of each software xstat type is the accumulative xstat
210 * followed by per-queue xstats.
212 for (i = 0; i < size; i++) {
213 if (id_base <= ids[i] && ids[i] <= id_base + nb_queues) {
214 rc = sfc_sw_stat_get_name(sa, sw_xstat,
215 xstats_names[i].name,
216 name_size, ids[i] - id_base);
226 sfc_sw_xstat_get_values(struct sfc_adapter *sa,
227 const struct sfc_sw_xstat_descr *sw_xstat,
228 struct rte_eth_xstat *xstats,
229 unsigned int xstats_size,
230 unsigned int *nb_written,
231 unsigned int *nb_supported)
235 struct rte_eth_xstat *accum_xstat;
236 bool count_accum_value = false;
237 unsigned int nb_queues;
239 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
242 *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
245 * The order of each software xstat type is the accumulative xstat
246 * followed by per-queue xstats.
248 if (*nb_written < xstats_size) {
249 count_accum_value = true;
250 accum_xstat = &xstats[*nb_written];
251 xstats[*nb_written].id = *nb_written;
252 xstats[*nb_written].value = 0;
256 for (qid = 0; qid < nb_queues; ++qid) {
257 value = sw_xstat->get_val(sa, qid);
259 if (*nb_written < xstats_size) {
260 xstats[*nb_written].id = *nb_written;
261 xstats[*nb_written].value = value;
265 if (count_accum_value)
266 accum_xstat->value += value;
271 sfc_sw_xstat_get_values_by_id(struct sfc_adapter *sa,
272 const struct sfc_sw_xstat_descr *sw_xstat,
275 unsigned int ids_size,
276 unsigned int *nb_supported)
278 rte_spinlock_t *bmp_lock = &sa->sw_xstats.queues_bitmap_lock;
279 struct rte_bitmap *bmp = sa->sw_xstats.queues_bitmap;
280 unsigned int id_base = *nb_supported;
281 bool count_accum_value = false;
282 unsigned int accum_value_idx;
283 uint64_t accum_value = 0;
285 unsigned int nb_queues;
288 rte_spinlock_lock(bmp_lock);
289 rte_bitmap_reset(bmp);
291 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
294 *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
297 * The order of each software xstat type is the accumulative xstat
298 * followed by per-queue xstats.
300 for (i = 0; i < ids_size; i++) {
301 if (id_base <= ids[i] && ids[i] <= (id_base + nb_queues)) {
302 if (ids[i] == id_base) { /* Accumulative value */
303 count_accum_value = true;
307 qid = ids[i] - id_base - 1;
308 values[i] = sw_xstat->get_val(sa, qid);
309 accum_value += values[i];
311 rte_bitmap_set(bmp, qid);
315 if (count_accum_value) {
316 values[accum_value_idx] = 0;
317 for (qid = 0; qid < nb_queues; ++qid) {
318 if (rte_bitmap_get(bmp, qid) != 0)
320 values[accum_value_idx] += sw_xstat->get_val(sa, qid);
322 values[accum_value_idx] += accum_value;
326 rte_spinlock_unlock(bmp_lock);
330 sfc_sw_xstats_get_nb_supported(struct sfc_adapter *sa)
332 unsigned int nb_supported = 0;
335 SFC_ASSERT(sfc_adapter_is_locked(sa));
337 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
338 nb_supported += sfc_sw_xstat_get_nb_supported(sa,
346 sfc_sw_xstats_get_vals(struct sfc_adapter *sa,
347 struct rte_eth_xstat *xstats,
348 unsigned int xstats_count,
349 unsigned int *nb_written,
350 unsigned int *nb_supported)
352 uint64_t *reset_vals = sa->sw_xstats.reset_vals;
353 unsigned int sw_xstats_offset;
356 sfc_adapter_lock(sa);
358 sw_xstats_offset = *nb_supported;
360 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
361 sfc_sw_xstat_get_values(sa, &sfc_sw_xstats[i], xstats,
362 xstats_count, nb_written, nb_supported);
365 for (i = sw_xstats_offset; i < *nb_written; i++)
366 xstats[i].value -= reset_vals[i - sw_xstats_offset];
368 sfc_adapter_unlock(sa);
372 sfc_sw_xstats_get_names(struct sfc_adapter *sa,
373 struct rte_eth_xstat_name *xstats_names,
374 unsigned int xstats_count,
375 unsigned int *nb_written,
376 unsigned int *nb_supported)
381 sfc_adapter_lock(sa);
383 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
384 ret = sfc_sw_stat_get_names(sa, &sfc_sw_xstats[i],
385 xstats_names, xstats_count,
386 nb_written, nb_supported);
388 sfc_adapter_unlock(sa);
393 sfc_adapter_unlock(sa);
399 sfc_sw_xstats_get_vals_by_id(struct sfc_adapter *sa,
403 unsigned int *nb_supported)
405 uint64_t *reset_vals = sa->sw_xstats.reset_vals;
406 unsigned int sw_xstats_offset;
409 sfc_adapter_lock(sa);
411 sw_xstats_offset = *nb_supported;
413 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
414 sfc_sw_xstat_get_values_by_id(sa, &sfc_sw_xstats[i], ids,
415 values, n, nb_supported);
418 for (i = 0; i < n; i++) {
419 if (sw_xstats_offset <= ids[i] && ids[i] < *nb_supported)
420 values[i] -= reset_vals[ids[i] - sw_xstats_offset];
423 sfc_adapter_unlock(sa);
427 sfc_sw_xstats_get_names_by_id(struct sfc_adapter *sa,
429 struct rte_eth_xstat_name *xstats_names,
431 unsigned int *nb_supported)
436 sfc_adapter_lock(sa);
438 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
439 ret = sfc_sw_xstat_get_names_by_id(sa, &sfc_sw_xstats[i], ids,
443 sfc_adapter_unlock(sa);
449 sfc_adapter_unlock(sa);
455 sfc_sw_xstat_reset(struct sfc_adapter *sa, struct sfc_sw_xstat_descr *sw_xstat,
456 uint64_t *reset_vals)
458 unsigned int nb_queues;
460 uint64_t *accum_xstat_reset;
462 SFC_ASSERT(sfc_adapter_is_locked(sa));
464 nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
469 * The order of each software xstat type is the accumulative xstat
470 * followed by per-queue xstats.
472 accum_xstat_reset = reset_vals;
473 *accum_xstat_reset = 0;
476 for (qid = 0; qid < nb_queues; ++qid) {
477 reset_vals[qid] = sw_xstat->get_val(sa, qid);
478 *accum_xstat_reset += reset_vals[qid];
483 sfc_sw_xstats_reset(struct sfc_adapter *sa)
485 uint64_t *reset_vals = sa->sw_xstats.reset_vals;
486 struct sfc_sw_xstat_descr *sw_xstat;
489 SFC_ASSERT(sfc_adapter_is_locked(sa));
491 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
492 sw_xstat = &sfc_sw_xstats[i];
493 sfc_sw_xstat_reset(sa, sw_xstat, reset_vals);
494 reset_vals += sfc_sw_xstat_get_nb_supported(sa, sw_xstat);
499 sfc_sw_xstats_configure(struct sfc_adapter *sa)
501 uint64_t **reset_vals = &sa->sw_xstats.reset_vals;
502 size_t nb_supported = 0;
505 for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++)
506 nb_supported += sfc_sw_xstat_get_nb_supported(sa,
509 *reset_vals = rte_realloc(*reset_vals,
510 nb_supported * sizeof(**reset_vals), 0);
511 if (*reset_vals == NULL)
514 memset(*reset_vals, 0, nb_supported * sizeof(**reset_vals));
520 sfc_sw_xstats_free_queues_bitmap(struct sfc_adapter *sa)
522 rte_bitmap_free(sa->sw_xstats.queues_bitmap);
523 rte_free(sa->sw_xstats.queues_bitmap_mem);
527 sfc_sw_xstats_alloc_queues_bitmap(struct sfc_adapter *sa)
529 struct rte_bitmap **queues_bitmap = &sa->sw_xstats.queues_bitmap;
530 void **queues_bitmap_mem = &sa->sw_xstats.queues_bitmap_mem;
534 bmp_size = rte_bitmap_get_memory_footprint(RTE_MAX_QUEUES_PER_PORT);
535 *queues_bitmap_mem = NULL;
536 *queues_bitmap = NULL;
538 *queues_bitmap_mem = rte_calloc_socket("bitmap_mem", bmp_size, 1, 0,
540 if (*queues_bitmap_mem == NULL)
543 *queues_bitmap = rte_bitmap_init(RTE_MAX_QUEUES_PER_PORT,
544 *queues_bitmap_mem, bmp_size);
545 if (*queues_bitmap == NULL) {
550 rte_spinlock_init(&sa->sw_xstats.queues_bitmap_lock);
554 sfc_sw_xstats_free_queues_bitmap(sa);
559 sfc_sw_xstats_init(struct sfc_adapter *sa)
561 sa->sw_xstats.reset_vals = NULL;
563 return sfc_sw_xstats_alloc_queues_bitmap(sa);
567 sfc_sw_xstats_close(struct sfc_adapter *sa)
569 rte_free(sa->sw_xstats.reset_vals);
570 sa->sw_xstats.reset_vals = NULL;
572 sfc_sw_xstats_free_queues_bitmap(sa);