4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include "event_ring.h"
45 /* device instance specific */
54 poll_return, /* for zero-count and used also for port bucket loop */
58 /* qid port mapping specific */
62 typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
63 uint16_t obj_idx, /* port or queue id */
64 enum xstats_type stat, int extra_arg);
66 struct sw_xstats_entry {
67 struct rte_event_dev_xstats_name name;
70 enum xstats_type stat;
71 enum rte_event_dev_xstats_mode mode;
73 uint8_t reset_allowed; /* when set, this value can be reset */
74 uint64_t reset_value; /* an offset to be taken away to emulate resets */
78 get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
79 enum xstats_type type, int extra_arg __rte_unused)
82 case rx: return sw->stats.rx_pkts;
83 case tx: return sw->stats.tx_pkts;
84 case dropped: return sw->stats.rx_dropped;
85 case calls: return sw->sched_called;
86 case no_iq_enq: return sw->sched_no_iq_enqueues;
87 case no_cq_enq: return sw->sched_no_cq_enqueues;
93 get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
94 enum xstats_type type, int extra_arg __rte_unused)
96 const struct sw_port *p = &sw->ports[obj_idx];
99 case rx: return p->stats.rx_pkts;
100 case tx: return p->stats.tx_pkts;
101 case dropped: return p->stats.rx_dropped;
102 case inflight: return p->inflights;
103 case pkt_cycles: return p->avg_pkt_ticks;
104 case calls: return p->total_polls;
105 case credits: return p->inflight_credits;
106 case poll_return: return p->zero_polls;
107 case rx_used: return qe_ring_count(p->rx_worker_ring);
108 case rx_free: return qe_ring_free_count(p->rx_worker_ring);
109 case tx_used: return qe_ring_count(p->cq_worker_ring);
110 case tx_free: return qe_ring_free_count(p->cq_worker_ring);
116 get_port_bucket_stat(const struct sw_evdev *sw, uint16_t obj_idx,
117 enum xstats_type type, int extra_arg)
119 const struct sw_port *p = &sw->ports[obj_idx];
122 case poll_return: return p->poll_buckets[extra_arg];
128 get_qid_stat(const struct sw_evdev *sw, uint16_t obj_idx,
129 enum xstats_type type, int extra_arg __rte_unused)
131 const struct sw_qid *qid = &sw->qids[obj_idx];
134 case rx: return qid->stats.rx_pkts;
135 case tx: return qid->stats.tx_pkts;
136 case dropped: return qid->stats.rx_dropped;
141 for (i = 0; i < RTE_DIM(qid->fids); i++)
142 infl += qid->fids[i].pcount;
146 case iq_size: return RTE_DIM(qid->iq[0]->ring);
152 get_qid_iq_stat(const struct sw_evdev *sw, uint16_t obj_idx,
153 enum xstats_type type, int extra_arg)
155 const struct sw_qid *qid = &sw->qids[obj_idx];
156 const int iq_idx = extra_arg;
159 case iq_used: return iq_ring_count(qid->iq[iq_idx]);
165 get_qid_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
166 enum xstats_type type, int extra_arg)
168 const struct sw_qid *qid = &sw->qids[obj_idx];
169 uint16_t port = extra_arg;
176 for (i = 0; i < RTE_DIM(qid->fids); i++)
177 if (qid->fids[i].cq == port)
187 sw_xstats_init(struct sw_evdev *sw)
190 * define the stats names and types. Used to build up the device
192 * There are multiple set of stats:
195 * - per-port-dequeue-burst-sizes
200 * For each of these sets, we have three parallel arrays, one for the
201 * names, the other for the stat type parameter to be passed in the fn
202 * call to get that stat. The third array allows resetting or not.
203 * All these arrays must be kept in sync
205 static const char * const dev_stats[] = { "rx", "tx", "drop",
206 "sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
208 static const enum xstats_type dev_types[] = { rx, tx, dropped,
209 calls, no_iq_enq, no_cq_enq,
211 /* all device stats are allowed to be reset */
213 static const char * const port_stats[] = {"rx", "tx", "drop",
214 "inflight", "avg_pkt_cycles", "credits",
215 "rx_ring_used", "rx_ring_free",
216 "cq_ring_used", "cq_ring_free",
217 "dequeue_calls", "dequeues_returning_0",
219 static const enum xstats_type port_types[] = { rx, tx, dropped,
220 inflight, pkt_cycles, credits,
221 rx_used, rx_free, tx_used, tx_free,
224 static const uint8_t port_reset_allowed[] = {1, 1, 1,
230 static const char * const port_bucket_stats[] = {
231 "dequeues_returning" };
232 static const enum xstats_type port_bucket_types[] = { poll_return };
233 /* all bucket dequeues are allowed to be reset, handled in loop below */
235 static const char * const qid_stats[] = {"rx", "tx", "drop",
236 "inflight", "iq_size"
238 static const enum xstats_type qid_types[] = { rx, tx, dropped,
241 static const uint8_t qid_reset_allowed[] = {1, 1, 1,
245 static const char * const qid_iq_stats[] = { "used" };
246 static const enum xstats_type qid_iq_types[] = { iq_used };
249 static const char * const qid_port_stats[] = { "pinned_flows" };
250 static const enum xstats_type qid_port_types[] = { pinned };
252 /* ---- end of stat definitions ---- */
254 /* check sizes, since a missed comma can lead to strings being
255 * joined by the compiler.
257 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
258 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
259 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
260 RTE_BUILD_BUG_ON(RTE_DIM(qid_iq_stats) != RTE_DIM(qid_iq_types));
261 RTE_BUILD_BUG_ON(RTE_DIM(qid_port_stats) != RTE_DIM(qid_port_types));
262 RTE_BUILD_BUG_ON(RTE_DIM(port_bucket_stats) !=
263 RTE_DIM(port_bucket_types));
265 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
266 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
269 const uint32_t cons_bkt_shift =
270 (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT);
271 const unsigned int count = RTE_DIM(dev_stats) +
272 sw->port_count * RTE_DIM(port_stats) +
273 sw->port_count * RTE_DIM(port_bucket_stats) *
274 (cons_bkt_shift + 1) +
275 sw->qid_count * RTE_DIM(qid_stats) +
276 sw->qid_count * SW_IQS_MAX * RTE_DIM(qid_iq_stats) +
277 sw->qid_count * sw->port_count *
278 RTE_DIM(qid_port_stats);
279 unsigned int i, port, qid, iq, bkt, stat = 0;
281 sw->xstats = rte_zmalloc_socket(NULL, sizeof(sw->xstats[0]) * count, 0,
282 sw->data->socket_id);
283 if (sw->xstats == NULL)
286 #define sname sw->xstats[stat].name.name
287 for (i = 0; i < RTE_DIM(dev_stats); i++, stat++) {
288 sw->xstats[stat] = (struct sw_xstats_entry){
290 .stat = dev_types[i],
291 .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
294 snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
296 sw->xstats_count_mode_dev = stat;
298 for (port = 0; port < sw->port_count; port++) {
299 sw->xstats_offset_for_port[port] = stat;
301 uint32_t count_offset = stat;
303 for (i = 0; i < RTE_DIM(port_stats); i++, stat++) {
304 sw->xstats[stat] = (struct sw_xstats_entry){
307 .stat = port_types[i],
308 .mode = RTE_EVENT_DEV_XSTATS_PORT,
309 .reset_allowed = port_reset_allowed[i],
311 snprintf(sname, sizeof(sname), "port_%u_%s",
312 port, port_stats[i]);
315 for (bkt = 0; bkt < (sw->ports[port].cq_worker_ring->size >>
316 SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
317 for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
318 sw->xstats[stat] = (struct sw_xstats_entry){
319 .fn = get_port_bucket_stat,
321 .stat = port_bucket_types[i],
322 .mode = RTE_EVENT_DEV_XSTATS_PORT,
326 snprintf(sname, sizeof(sname),
328 port, port_bucket_stats[i],
329 (bkt << SW_DEQ_STAT_BUCKET_SHIFT) + 1,
330 (bkt + 1) << SW_DEQ_STAT_BUCKET_SHIFT);
335 sw->xstats_count_per_port[port] = stat - count_offset;
338 sw->xstats_count_mode_port = stat - sw->xstats_count_mode_dev;
340 for (qid = 0; qid < sw->qid_count; qid++) {
341 uint32_t count_offset = stat;
342 sw->xstats_offset_for_qid[qid] = stat;
344 for (i = 0; i < RTE_DIM(qid_stats); i++, stat++) {
345 sw->xstats[stat] = (struct sw_xstats_entry){
348 .stat = qid_types[i],
349 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
350 .reset_allowed = qid_reset_allowed[i],
352 snprintf(sname, sizeof(sname), "qid_%u_%s",
355 for (iq = 0; iq < SW_IQS_MAX; iq++)
356 for (i = 0; i < RTE_DIM(qid_iq_stats); i++, stat++) {
357 sw->xstats[stat] = (struct sw_xstats_entry){
358 .fn = get_qid_iq_stat,
360 .stat = qid_iq_types[i],
361 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
365 snprintf(sname, sizeof(sname),
371 for (port = 0; port < sw->port_count; port++)
372 for (i = 0; i < RTE_DIM(qid_port_stats); i++, stat++) {
373 sw->xstats[stat] = (struct sw_xstats_entry){
374 .fn = get_qid_port_stat,
376 .stat = qid_port_types[i],
377 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
381 snprintf(sname, sizeof(sname),
387 sw->xstats_count_per_qid[qid] = stat - count_offset;
390 sw->xstats_count_mode_queue = stat -
391 (sw->xstats_count_mode_dev + sw->xstats_count_mode_port);
394 sw->xstats_count = stat;
400 sw_xstats_uninit(struct sw_evdev *sw)
402 rte_free(sw->xstats);
403 sw->xstats_count = 0;
408 sw_xstats_get_names(const struct rte_eventdev *dev,
409 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
410 struct rte_event_dev_xstats_name *xstats_names,
411 unsigned int *ids, unsigned int size)
413 const struct sw_evdev *sw = sw_pmd_priv_const(dev);
415 unsigned int xidx = 0;
417 RTE_SET_USED(queue_port_id);
419 uint32_t xstats_mode_count = 0;
420 uint32_t start_offset = 0;
423 case RTE_EVENT_DEV_XSTATS_DEVICE:
424 xstats_mode_count = sw->xstats_count_mode_dev;
426 case RTE_EVENT_DEV_XSTATS_PORT:
427 if (queue_port_id >= (signed int)sw->port_count)
429 xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
430 start_offset = sw->xstats_offset_for_port[queue_port_id];
432 case RTE_EVENT_DEV_XSTATS_QUEUE:
433 if (queue_port_id >= (signed int)sw->qid_count)
435 xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
436 start_offset = sw->xstats_offset_for_qid[queue_port_id];
439 SW_LOG_ERR("Invalid mode received in sw_xstats_get_names()\n");
443 if (xstats_mode_count > size || !ids || !xstats_names)
444 return xstats_mode_count;
446 for (i = 0; i < sw->xstats_count && xidx < size; i++) {
447 if (sw->xstats[i].mode != mode)
450 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
451 queue_port_id != sw->xstats[i].obj_idx)
454 xstats_names[xidx] = sw->xstats[i].name;
456 ids[xidx] = start_offset + xidx;
463 sw_xstats_update(struct sw_evdev *sw, enum rte_event_dev_xstats_mode mode,
464 uint8_t queue_port_id, const unsigned int ids[],
465 uint64_t values[], unsigned int n, const uint32_t reset,
466 const uint32_t ret_if_n_lt_nstats)
469 unsigned int xidx = 0;
471 RTE_SET_USED(queue_port_id);
473 uint32_t xstats_mode_count = 0;
476 case RTE_EVENT_DEV_XSTATS_DEVICE:
477 xstats_mode_count = sw->xstats_count_mode_dev;
479 case RTE_EVENT_DEV_XSTATS_PORT:
480 if (queue_port_id >= (signed int)sw->port_count)
482 xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
484 case RTE_EVENT_DEV_XSTATS_QUEUE:
485 if (queue_port_id >= (signed int)sw->qid_count)
487 xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
490 SW_LOG_ERR("Invalid mode received in sw_xstats_get()\n");
494 /* this function can check num stats and return them (xstats_get() style
495 * behaviour) or ignore n for reset() of a single stat style behaviour.
497 if (ret_if_n_lt_nstats && xstats_mode_count > n)
498 return xstats_mode_count;
500 for (i = 0; i < n && xidx < xstats_mode_count; i++) {
501 struct sw_xstats_entry *xs = &sw->xstats[ids[i]];
502 if (ids[i] > sw->xstats_count || xs->mode != mode)
505 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
506 queue_port_id != xs->obj_idx)
509 uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
515 if (xs->reset_allowed && reset)
516 xs->reset_value = val;
527 sw_xstats_get(const struct rte_eventdev *dev,
528 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
529 const unsigned int ids[], uint64_t values[], unsigned int n)
531 struct sw_evdev *sw = sw_pmd_priv(dev);
532 const uint32_t reset = 0;
533 const uint32_t ret_n_lt_stats = 1;
534 return sw_xstats_update(sw, mode, queue_port_id, ids, values, n,
535 reset, ret_n_lt_stats);
539 sw_xstats_get_by_name(const struct rte_eventdev *dev,
540 const char *name, unsigned int *id)
542 const struct sw_evdev *sw = sw_pmd_priv_const(dev);
545 for (i = 0; i < sw->xstats_count; i++) {
546 struct sw_xstats_entry *xs = &sw->xstats[i];
547 if (strncmp(xs->name.name, name,
548 RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
551 return xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
561 sw_xstats_reset_range(struct sw_evdev *sw, uint32_t start, uint32_t num)
564 for (i = start; i < start + num; i++) {
565 struct sw_xstats_entry *xs = &sw->xstats[i];
566 if (!xs->reset_allowed)
569 uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
571 xs->reset_value = val;
576 sw_xstats_reset_queue(struct sw_evdev *sw, uint8_t queue_id,
577 const uint32_t ids[], uint32_t nb_ids)
579 const uint32_t reset = 1;
580 const uint32_t ret_n_lt_stats = 0;
582 uint32_t nb_reset = sw_xstats_update(sw,
583 RTE_EVENT_DEV_XSTATS_QUEUE,
584 queue_id, ids, NULL, nb_ids,
585 reset, ret_n_lt_stats);
586 return nb_reset == nb_ids ? 0 : -EINVAL;
590 sw_xstats_reset_range(sw, sw->xstats_offset_for_qid[queue_id],
591 sw->xstats_count_per_qid[queue_id]);
597 sw_xstats_reset_port(struct sw_evdev *sw, uint8_t port_id,
598 const uint32_t ids[], uint32_t nb_ids)
600 const uint32_t reset = 1;
601 const uint32_t ret_n_lt_stats = 0;
602 int offset = sw->xstats_offset_for_port[port_id];
603 int nb_stat = sw->xstats_count_per_port[port_id];
606 uint32_t nb_reset = sw_xstats_update(sw,
607 RTE_EVENT_DEV_XSTATS_PORT, port_id,
609 reset, ret_n_lt_stats);
610 return nb_reset == nb_ids ? 0 : -EINVAL;
613 sw_xstats_reset_range(sw, offset, nb_stat);
618 sw_xstats_reset_dev(struct sw_evdev *sw, const uint32_t ids[], uint32_t nb_ids)
622 for (i = 0; i < nb_ids; i++) {
623 uint32_t id = ids[i];
624 if (id >= sw->xstats_count_mode_dev)
626 sw_xstats_reset_range(sw, id, 1);
629 for (i = 0; i < sw->xstats_count_mode_dev; i++)
630 sw_xstats_reset_range(sw, i, 1);
637 sw_xstats_reset(struct rte_eventdev *dev,
638 enum rte_event_dev_xstats_mode mode,
639 int16_t queue_port_id,
640 const uint32_t ids[],
643 struct sw_evdev *sw = sw_pmd_priv(dev);
646 /* handle -1 for queue_port_id here, looping over all ports/queues */
648 case RTE_EVENT_DEV_XSTATS_DEVICE:
649 sw_xstats_reset_dev(sw, ids, nb_ids);
651 case RTE_EVENT_DEV_XSTATS_PORT:
652 if (queue_port_id == -1) {
653 for (i = 0; i < sw->port_count; i++) {
654 err = sw_xstats_reset_port(sw, i, ids, nb_ids);
658 } else if (queue_port_id < (int16_t)sw->port_count)
659 sw_xstats_reset_port(sw, queue_port_id, ids, nb_ids);
661 case RTE_EVENT_DEV_XSTATS_QUEUE:
662 if (queue_port_id == -1) {
663 for (i = 0; i < sw->qid_count; i++) {
664 err = sw_xstats_reset_queue(sw, i, ids, nb_ids);
668 } else if (queue_port_id < (int16_t)sw->qid_count)
669 sw_xstats_reset_queue(sw, queue_port_id, ids, nb_ids);