4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include "event_ring.h"
45 /* device instance specific */
54 poll_return, /* for zero-count and used also for port bucket loop */
58 /* qid port mapping specific */
60 pkts, /* note: qid-to-port pkts */
63 typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
64 uint16_t obj_idx, /* port or queue id */
65 enum xstats_type stat, int extra_arg);
67 struct sw_xstats_entry {
68 struct rte_event_dev_xstats_name name;
71 enum xstats_type stat;
72 enum rte_event_dev_xstats_mode mode;
74 uint8_t reset_allowed; /* when set, this value can be reset */
75 uint64_t reset_value; /* an offset to be taken away to emulate resets */
79 get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
80 enum xstats_type type, int extra_arg __rte_unused)
83 case rx: return sw->stats.rx_pkts;
84 case tx: return sw->stats.tx_pkts;
85 case dropped: return sw->stats.rx_dropped;
86 case calls: return sw->sched_called;
87 case no_iq_enq: return sw->sched_no_iq_enqueues;
88 case no_cq_enq: return sw->sched_no_cq_enqueues;
94 get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
95 enum xstats_type type, int extra_arg __rte_unused)
97 const struct sw_port *p = &sw->ports[obj_idx];
100 case rx: return p->stats.rx_pkts;
101 case tx: return p->stats.tx_pkts;
102 case dropped: return p->stats.rx_dropped;
103 case inflight: return p->inflights;
104 case pkt_cycles: return p->avg_pkt_ticks;
105 case calls: return p->total_polls;
106 case credits: return p->inflight_credits;
107 case poll_return: return p->zero_polls;
108 case rx_used: return qe_ring_count(p->rx_worker_ring);
109 case rx_free: return qe_ring_free_count(p->rx_worker_ring);
110 case tx_used: return qe_ring_count(p->cq_worker_ring);
111 case tx_free: return qe_ring_free_count(p->cq_worker_ring);
117 get_port_bucket_stat(const struct sw_evdev *sw, uint16_t obj_idx,
118 enum xstats_type type, int extra_arg)
120 const struct sw_port *p = &sw->ports[obj_idx];
123 case poll_return: return p->poll_buckets[extra_arg];
129 get_qid_stat(const struct sw_evdev *sw, uint16_t obj_idx,
130 enum xstats_type type, int extra_arg __rte_unused)
132 const struct sw_qid *qid = &sw->qids[obj_idx];
135 case rx: return qid->stats.rx_pkts;
136 case tx: return qid->stats.tx_pkts;
137 case dropped: return qid->stats.rx_dropped;
142 for (i = 0; i < RTE_DIM(qid->fids); i++)
143 infl += qid->fids[i].pcount;
147 case iq_size: return RTE_DIM(qid->iq[0]->ring);
153 get_qid_iq_stat(const struct sw_evdev *sw, uint16_t obj_idx,
154 enum xstats_type type, int extra_arg)
156 const struct sw_qid *qid = &sw->qids[obj_idx];
157 const int iq_idx = extra_arg;
160 case iq_used: return iq_ring_count(qid->iq[iq_idx]);
166 get_qid_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
167 enum xstats_type type, int extra_arg)
169 const struct sw_qid *qid = &sw->qids[obj_idx];
170 uint16_t port = extra_arg;
177 for (i = 0; i < RTE_DIM(qid->fids); i++)
178 if (qid->fids[i].cq == port)
184 return qid->to_port[port];
190 sw_xstats_init(struct sw_evdev *sw)
193 * define the stats names and types. Used to build up the device
195 * There are multiple set of stats:
198 * - per-port-dequeue-burst-sizes
203 * For each of these sets, we have three parallel arrays, one for the
204 * names, the other for the stat type parameter to be passed in the fn
205 * call to get that stat. The third array allows resetting or not.
206 * All these arrays must be kept in sync
208 static const char * const dev_stats[] = { "rx", "tx", "drop",
209 "sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
211 static const enum xstats_type dev_types[] = { rx, tx, dropped,
212 calls, no_iq_enq, no_cq_enq,
214 /* all device stats are allowed to be reset */
216 static const char * const port_stats[] = {"rx", "tx", "drop",
217 "inflight", "avg_pkt_cycles", "credits",
218 "rx_ring_used", "rx_ring_free",
219 "cq_ring_used", "cq_ring_free",
220 "dequeue_calls", "dequeues_returning_0",
222 static const enum xstats_type port_types[] = { rx, tx, dropped,
223 inflight, pkt_cycles, credits,
224 rx_used, rx_free, tx_used, tx_free,
227 static const uint8_t port_reset_allowed[] = {1, 1, 1,
233 static const char * const port_bucket_stats[] = {
234 "dequeues_returning" };
235 static const enum xstats_type port_bucket_types[] = { poll_return };
236 /* all bucket dequeues are allowed to be reset, handled in loop below */
238 static const char * const qid_stats[] = {"rx", "tx", "drop",
239 "inflight", "iq_size"
241 static const enum xstats_type qid_types[] = { rx, tx, dropped,
244 static const uint8_t qid_reset_allowed[] = {1, 1, 1,
248 static const char * const qid_iq_stats[] = { "used" };
249 static const enum xstats_type qid_iq_types[] = { iq_used };
252 static const char * const qid_port_stats[] = { "pinned_flows",
255 static const enum xstats_type qid_port_types[] = { pinned, pkts };
256 static const uint8_t qid_port_reset_allowed[] = {0, 1};
258 /* ---- end of stat definitions ---- */
260 /* check sizes, since a missed comma can lead to strings being
261 * joined by the compiler.
263 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
264 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
265 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
266 RTE_BUILD_BUG_ON(RTE_DIM(qid_iq_stats) != RTE_DIM(qid_iq_types));
267 RTE_BUILD_BUG_ON(RTE_DIM(qid_port_stats) != RTE_DIM(qid_port_types));
268 RTE_BUILD_BUG_ON(RTE_DIM(port_bucket_stats) !=
269 RTE_DIM(port_bucket_types));
271 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
272 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
275 const uint32_t cons_bkt_shift =
276 (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT);
277 const unsigned int count = RTE_DIM(dev_stats) +
278 sw->port_count * RTE_DIM(port_stats) +
279 sw->port_count * RTE_DIM(port_bucket_stats) *
280 (cons_bkt_shift + 1) +
281 sw->qid_count * RTE_DIM(qid_stats) +
282 sw->qid_count * SW_IQS_MAX * RTE_DIM(qid_iq_stats) +
283 sw->qid_count * sw->port_count *
284 RTE_DIM(qid_port_stats);
285 unsigned int i, port, qid, iq, bkt, stat = 0;
287 sw->xstats = rte_zmalloc_socket(NULL, sizeof(sw->xstats[0]) * count, 0,
288 sw->data->socket_id);
289 if (sw->xstats == NULL)
292 #define sname sw->xstats[stat].name.name
293 for (i = 0; i < RTE_DIM(dev_stats); i++, stat++) {
294 sw->xstats[stat] = (struct sw_xstats_entry){
296 .stat = dev_types[i],
297 .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
300 snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
302 sw->xstats_count_mode_dev = stat;
304 for (port = 0; port < sw->port_count; port++) {
305 sw->xstats_offset_for_port[port] = stat;
307 uint32_t count_offset = stat;
309 for (i = 0; i < RTE_DIM(port_stats); i++, stat++) {
310 sw->xstats[stat] = (struct sw_xstats_entry){
313 .stat = port_types[i],
314 .mode = RTE_EVENT_DEV_XSTATS_PORT,
315 .reset_allowed = port_reset_allowed[i],
317 snprintf(sname, sizeof(sname), "port_%u_%s",
318 port, port_stats[i]);
321 for (bkt = 0; bkt < (sw->ports[port].cq_worker_ring->size >>
322 SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
323 for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
324 sw->xstats[stat] = (struct sw_xstats_entry){
325 .fn = get_port_bucket_stat,
327 .stat = port_bucket_types[i],
328 .mode = RTE_EVENT_DEV_XSTATS_PORT,
332 snprintf(sname, sizeof(sname),
334 port, port_bucket_stats[i],
335 (bkt << SW_DEQ_STAT_BUCKET_SHIFT) + 1,
336 (bkt + 1) << SW_DEQ_STAT_BUCKET_SHIFT);
341 sw->xstats_count_per_port[port] = stat - count_offset;
344 sw->xstats_count_mode_port = stat - sw->xstats_count_mode_dev;
346 for (qid = 0; qid < sw->qid_count; qid++) {
347 uint32_t count_offset = stat;
348 sw->xstats_offset_for_qid[qid] = stat;
350 for (i = 0; i < RTE_DIM(qid_stats); i++, stat++) {
351 sw->xstats[stat] = (struct sw_xstats_entry){
354 .stat = qid_types[i],
355 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
356 .reset_allowed = qid_reset_allowed[i],
358 snprintf(sname, sizeof(sname), "qid_%u_%s",
361 for (iq = 0; iq < SW_IQS_MAX; iq++)
362 for (i = 0; i < RTE_DIM(qid_iq_stats); i++, stat++) {
363 sw->xstats[stat] = (struct sw_xstats_entry){
364 .fn = get_qid_iq_stat,
366 .stat = qid_iq_types[i],
367 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
371 snprintf(sname, sizeof(sname),
377 for (port = 0; port < sw->port_count; port++)
378 for (i = 0; i < RTE_DIM(qid_port_stats); i++, stat++) {
379 sw->xstats[stat] = (struct sw_xstats_entry){
380 .fn = get_qid_port_stat,
382 .stat = qid_port_types[i],
383 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
386 qid_port_reset_allowed[i],
388 snprintf(sname, sizeof(sname),
394 sw->xstats_count_per_qid[qid] = stat - count_offset;
397 sw->xstats_count_mode_queue = stat -
398 (sw->xstats_count_mode_dev + sw->xstats_count_mode_port);
401 sw->xstats_count = stat;
407 sw_xstats_uninit(struct sw_evdev *sw)
409 rte_free(sw->xstats);
410 sw->xstats_count = 0;
415 sw_xstats_get_names(const struct rte_eventdev *dev,
416 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
417 struct rte_event_dev_xstats_name *xstats_names,
418 unsigned int *ids, unsigned int size)
420 const struct sw_evdev *sw = sw_pmd_priv_const(dev);
422 unsigned int xidx = 0;
424 RTE_SET_USED(queue_port_id);
426 uint32_t xstats_mode_count = 0;
427 uint32_t start_offset = 0;
430 case RTE_EVENT_DEV_XSTATS_DEVICE:
431 xstats_mode_count = sw->xstats_count_mode_dev;
433 case RTE_EVENT_DEV_XSTATS_PORT:
434 if (queue_port_id >= (signed int)sw->port_count)
436 xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
437 start_offset = sw->xstats_offset_for_port[queue_port_id];
439 case RTE_EVENT_DEV_XSTATS_QUEUE:
440 if (queue_port_id >= (signed int)sw->qid_count)
442 xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
443 start_offset = sw->xstats_offset_for_qid[queue_port_id];
446 SW_LOG_ERR("Invalid mode received in sw_xstats_get_names()\n");
450 if (xstats_mode_count > size || !ids || !xstats_names)
451 return xstats_mode_count;
453 for (i = 0; i < sw->xstats_count && xidx < size; i++) {
454 if (sw->xstats[i].mode != mode)
457 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
458 queue_port_id != sw->xstats[i].obj_idx)
461 xstats_names[xidx] = sw->xstats[i].name;
463 ids[xidx] = start_offset + xidx;
470 sw_xstats_update(struct sw_evdev *sw, enum rte_event_dev_xstats_mode mode,
471 uint8_t queue_port_id, const unsigned int ids[],
472 uint64_t values[], unsigned int n, const uint32_t reset,
473 const uint32_t ret_if_n_lt_nstats)
476 unsigned int xidx = 0;
478 RTE_SET_USED(queue_port_id);
480 uint32_t xstats_mode_count = 0;
483 case RTE_EVENT_DEV_XSTATS_DEVICE:
484 xstats_mode_count = sw->xstats_count_mode_dev;
486 case RTE_EVENT_DEV_XSTATS_PORT:
487 if (queue_port_id >= (signed int)sw->port_count)
489 xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
491 case RTE_EVENT_DEV_XSTATS_QUEUE:
492 if (queue_port_id >= (signed int)sw->qid_count)
494 xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
497 SW_LOG_ERR("Invalid mode received in sw_xstats_get()\n");
501 /* this function can check num stats and return them (xstats_get() style
502 * behaviour) or ignore n for reset() of a single stat style behaviour.
504 if (ret_if_n_lt_nstats && xstats_mode_count > n)
505 return xstats_mode_count;
507 for (i = 0; i < n && xidx < xstats_mode_count; i++) {
508 struct sw_xstats_entry *xs = &sw->xstats[ids[i]];
509 if (ids[i] > sw->xstats_count || xs->mode != mode)
512 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
513 queue_port_id != xs->obj_idx)
516 uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
522 if (xs->reset_allowed && reset)
523 xs->reset_value = val;
534 sw_xstats_get(const struct rte_eventdev *dev,
535 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
536 const unsigned int ids[], uint64_t values[], unsigned int n)
538 struct sw_evdev *sw = sw_pmd_priv(dev);
539 const uint32_t reset = 0;
540 const uint32_t ret_n_lt_stats = 1;
541 return sw_xstats_update(sw, mode, queue_port_id, ids, values, n,
542 reset, ret_n_lt_stats);
546 sw_xstats_get_by_name(const struct rte_eventdev *dev,
547 const char *name, unsigned int *id)
549 const struct sw_evdev *sw = sw_pmd_priv_const(dev);
552 for (i = 0; i < sw->xstats_count; i++) {
553 struct sw_xstats_entry *xs = &sw->xstats[i];
554 if (strncmp(xs->name.name, name,
555 RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
558 return xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
568 sw_xstats_reset_range(struct sw_evdev *sw, uint32_t start, uint32_t num)
571 for (i = start; i < start + num; i++) {
572 struct sw_xstats_entry *xs = &sw->xstats[i];
573 if (!xs->reset_allowed)
576 uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
578 xs->reset_value = val;
583 sw_xstats_reset_queue(struct sw_evdev *sw, uint8_t queue_id,
584 const uint32_t ids[], uint32_t nb_ids)
586 const uint32_t reset = 1;
587 const uint32_t ret_n_lt_stats = 0;
589 uint32_t nb_reset = sw_xstats_update(sw,
590 RTE_EVENT_DEV_XSTATS_QUEUE,
591 queue_id, ids, NULL, nb_ids,
592 reset, ret_n_lt_stats);
593 return nb_reset == nb_ids ? 0 : -EINVAL;
597 sw_xstats_reset_range(sw, sw->xstats_offset_for_qid[queue_id],
598 sw->xstats_count_per_qid[queue_id]);
604 sw_xstats_reset_port(struct sw_evdev *sw, uint8_t port_id,
605 const uint32_t ids[], uint32_t nb_ids)
607 const uint32_t reset = 1;
608 const uint32_t ret_n_lt_stats = 0;
609 int offset = sw->xstats_offset_for_port[port_id];
610 int nb_stat = sw->xstats_count_per_port[port_id];
613 uint32_t nb_reset = sw_xstats_update(sw,
614 RTE_EVENT_DEV_XSTATS_PORT, port_id,
616 reset, ret_n_lt_stats);
617 return nb_reset == nb_ids ? 0 : -EINVAL;
620 sw_xstats_reset_range(sw, offset, nb_stat);
625 sw_xstats_reset_dev(struct sw_evdev *sw, const uint32_t ids[], uint32_t nb_ids)
629 for (i = 0; i < nb_ids; i++) {
630 uint32_t id = ids[i];
631 if (id >= sw->xstats_count_mode_dev)
633 sw_xstats_reset_range(sw, id, 1);
636 for (i = 0; i < sw->xstats_count_mode_dev; i++)
637 sw_xstats_reset_range(sw, i, 1);
644 sw_xstats_reset(struct rte_eventdev *dev,
645 enum rte_event_dev_xstats_mode mode,
646 int16_t queue_port_id,
647 const uint32_t ids[],
650 struct sw_evdev *sw = sw_pmd_priv(dev);
653 /* handle -1 for queue_port_id here, looping over all ports/queues */
655 case RTE_EVENT_DEV_XSTATS_DEVICE:
656 sw_xstats_reset_dev(sw, ids, nb_ids);
658 case RTE_EVENT_DEV_XSTATS_PORT:
659 if (queue_port_id == -1) {
660 for (i = 0; i < sw->port_count; i++) {
661 err = sw_xstats_reset_port(sw, i, ids, nb_ids);
665 } else if (queue_port_id < (int16_t)sw->port_count)
666 sw_xstats_reset_port(sw, queue_port_id, ids, nb_ids);
668 case RTE_EVENT_DEV_XSTATS_QUEUE:
669 if (queue_port_id == -1) {
670 for (i = 0; i < sw->qid_count; i++) {
671 err = sw_xstats_reset_queue(sw, i, ids, nb_ids);
675 } else if (queue_port_id < (int16_t)sw->qid_count)
676 sw_xstats_reset_queue(sw, queue_port_id, ids, nb_ids);