1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <rte_string_fns.h>
13 #include <rte_cycles.h>
14 #include <rte_ethdev.h>
15 #include <rte_metrics.h>
16 #include <rte_memzone.h>
17 #include <rte_lcore.h>
19 #include "rte_latencystats.h"
21 /** Nano seconds per second */
22 #define NS_PER_SEC 1E9
24 /** Clock cycles per nano second */
26 latencystat_cycles_per_ns(void)
28 return rte_get_timer_hz() / NS_PER_SEC;
31 /* Macros for printing using RTE_LOG */
32 #define RTE_LOGTYPE_LATENCY_STATS RTE_LOGTYPE_USER1
34 static const char *MZ_RTE_LATENCY_STATS = "rte_latencystats";
35 static int latency_stats_index;
36 static uint64_t samp_intvl;
37 static uint64_t timer_tsc;
38 static uint64_t prev_tsc;
40 struct rte_latency_stats {
41 float min_latency; /**< Minimum latency in nano seconds */
42 float avg_latency; /**< Average latency in nano seconds */
43 float max_latency; /**< Maximum latency in nano seconds */
44 float jitter; /** Latency variation */
45 rte_spinlock_t lock; /** Latency calculation lock */
48 static struct rte_latency_stats *glob_stats;
51 const struct rte_eth_rxtx_callback *cb;
54 static struct rxtx_cbs rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
55 static struct rxtx_cbs tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
57 struct latency_stats_nameoff {
58 char name[RTE_ETH_XSTATS_NAME_SIZE];
62 static const struct latency_stats_nameoff lat_stats_strings[] = {
63 {"min_latency_ns", offsetof(struct rte_latency_stats, min_latency)},
64 {"avg_latency_ns", offsetof(struct rte_latency_stats, avg_latency)},
65 {"max_latency_ns", offsetof(struct rte_latency_stats, max_latency)},
66 {"jitter_ns", offsetof(struct rte_latency_stats, jitter)},
69 #define NUM_LATENCY_STATS (sizeof(lat_stats_strings) / \
70 sizeof(lat_stats_strings[0]))
73 rte_latencystats_update(void)
76 float *stats_ptr = NULL;
77 uint64_t values[NUM_LATENCY_STATS] = {0};
80 for (i = 0; i < NUM_LATENCY_STATS; i++) {
81 stats_ptr = RTE_PTR_ADD(glob_stats,
82 lat_stats_strings[i].offset);
83 values[i] = (uint64_t)floor((*stats_ptr)/
84 latencystat_cycles_per_ns());
87 ret = rte_metrics_update_values(RTE_METRICS_GLOBAL,
89 values, NUM_LATENCY_STATS);
91 RTE_LOG(INFO, LATENCY_STATS, "Failed to push the stats\n");
97 rte_latencystats_fill_values(struct rte_metric_value *values)
100 float *stats_ptr = NULL;
102 for (i = 0; i < NUM_LATENCY_STATS; i++) {
103 stats_ptr = RTE_PTR_ADD(glob_stats,
104 lat_stats_strings[i].offset);
106 values[i].value = (uint64_t)floor((*stats_ptr)/
107 latencystat_cycles_per_ns());
112 add_time_stamps(uint16_t pid __rte_unused,
113 uint16_t qid __rte_unused,
114 struct rte_mbuf **pkts,
116 uint16_t max_pkts __rte_unused,
117 void *user_cb __rte_unused)
120 uint64_t diff_tsc, now;
123 * For every sample interval,
124 * time stamp is marked on one received packet.
127 for (i = 0; i < nb_pkts; i++) {
128 diff_tsc = now - prev_tsc;
129 timer_tsc += diff_tsc;
131 if ((pkts[i]->ol_flags & PKT_RX_TIMESTAMP) == 0
132 && (timer_tsc >= samp_intvl)) {
133 pkts[i]->timestamp = now;
134 pkts[i]->ol_flags |= PKT_RX_TIMESTAMP;
145 calc_latency(uint16_t pid __rte_unused,
146 uint16_t qid __rte_unused,
147 struct rte_mbuf **pkts,
149 void *_ __rte_unused)
151 unsigned int i, cnt = 0;
153 float latency[nb_pkts];
154 static float prev_latency;
156 * Alpha represents degree of weighting decrease in EWMA,
157 * a constant smoothing factor between 0 and 1. The value
158 * is used below for measuring average latency.
160 const float alpha = 0.2;
163 for (i = 0; i < nb_pkts; i++) {
164 if (pkts[i]->ol_flags & PKT_RX_TIMESTAMP)
165 latency[cnt++] = now - pkts[i]->timestamp;
168 rte_spinlock_lock(&glob_stats->lock);
169 for (i = 0; i < cnt; i++) {
171 * The jitter is calculated as statistical mean of interpacket
172 * delay variation. The "jitter estimate" is computed by taking
173 * the absolute values of the ipdv sequence and applying an
174 * exponential filter with parameter 1/16 to generate the
175 * estimate. i.e J=J+(|D(i-1,i)|-J)/16. Where J is jitter,
176 * D(i-1,i) is difference in latency of two consecutive packets
178 * Reference: Calculated as per RFC 5481, sec 4.1,
179 * RFC 3393 sec 4.5, RFC 1889 sec.
181 glob_stats->jitter += (fabsf(prev_latency - latency[i])
182 - glob_stats->jitter)/16;
183 if (glob_stats->min_latency == 0)
184 glob_stats->min_latency = latency[i];
185 else if (latency[i] < glob_stats->min_latency)
186 glob_stats->min_latency = latency[i];
187 else if (latency[i] > glob_stats->max_latency)
188 glob_stats->max_latency = latency[i];
190 * The average latency is measured using exponential moving
191 * average, i.e. using EWMA
192 * https://en.wikipedia.org/wiki/Moving_average
194 glob_stats->avg_latency +=
195 alpha * (latency[i] - glob_stats->avg_latency);
196 prev_latency = latency[i];
198 rte_spinlock_unlock(&glob_stats->lock);
204 rte_latencystats_init(uint64_t app_samp_intvl,
205 rte_latency_stats_flow_type_fn user_cb)
210 struct rxtx_cbs *cbs = NULL;
211 const char *ptr_strings[NUM_LATENCY_STATS] = {0};
212 const struct rte_memzone *mz = NULL;
213 const unsigned int flags = 0;
216 if (rte_memzone_lookup(MZ_RTE_LATENCY_STATS))
219 /** Allocate stats in shared memory fo multi process support */
220 mz = rte_memzone_reserve(MZ_RTE_LATENCY_STATS, sizeof(*glob_stats),
221 rte_socket_id(), flags);
223 RTE_LOG(ERR, LATENCY_STATS, "Cannot reserve memory: %s:%d\n",
228 glob_stats = mz->addr;
229 rte_spinlock_init(&glob_stats->lock);
230 samp_intvl = app_samp_intvl * latencystat_cycles_per_ns();
232 /** Register latency stats with stats library */
233 for (i = 0; i < NUM_LATENCY_STATS; i++)
234 ptr_strings[i] = lat_stats_strings[i].name;
236 latency_stats_index = rte_metrics_reg_names(ptr_strings,
238 if (latency_stats_index < 0) {
239 RTE_LOG(DEBUG, LATENCY_STATS,
240 "Failed to register latency stats names\n");
244 /** Register Rx/Tx callbacks */
245 RTE_ETH_FOREACH_DEV(pid) {
246 struct rte_eth_dev_info dev_info;
248 ret = rte_eth_dev_info_get(pid, &dev_info);
250 RTE_LOG(INFO, LATENCY_STATS,
251 "Error during getting device (port %u) info: %s\n",
252 pid, strerror(-ret));
257 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
258 cbs = &rx_cbs[pid][qid];
259 cbs->cb = rte_eth_add_first_rx_callback(pid, qid,
260 add_time_stamps, user_cb);
262 RTE_LOG(INFO, LATENCY_STATS, "Failed to "
263 "register Rx callback for pid=%d, "
264 "qid=%d\n", pid, qid);
266 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
267 cbs = &tx_cbs[pid][qid];
268 cbs->cb = rte_eth_add_tx_callback(pid, qid,
269 calc_latency, user_cb);
271 RTE_LOG(INFO, LATENCY_STATS, "Failed to "
272 "register Tx callback for pid=%d, "
273 "qid=%d\n", pid, qid);
280 rte_latencystats_uninit(void)
285 struct rxtx_cbs *cbs = NULL;
286 const struct rte_memzone *mz = NULL;
288 /** De register Rx/Tx callbacks */
289 RTE_ETH_FOREACH_DEV(pid) {
290 struct rte_eth_dev_info dev_info;
292 ret = rte_eth_dev_info_get(pid, &dev_info);
294 RTE_LOG(INFO, LATENCY_STATS,
295 "Error during getting device (port %u) info: %s\n",
296 pid, strerror(-ret));
301 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
302 cbs = &rx_cbs[pid][qid];
303 ret = rte_eth_remove_rx_callback(pid, qid, cbs->cb);
305 RTE_LOG(INFO, LATENCY_STATS, "failed to "
306 "remove Rx callback for pid=%d, "
307 "qid=%d\n", pid, qid);
309 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
310 cbs = &tx_cbs[pid][qid];
311 ret = rte_eth_remove_tx_callback(pid, qid, cbs->cb);
313 RTE_LOG(INFO, LATENCY_STATS, "failed to "
314 "remove Tx callback for pid=%d, "
315 "qid=%d\n", pid, qid);
319 /* free up the memzone */
320 mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
322 rte_memzone_free(mz);
328 rte_latencystats_get_names(struct rte_metric_name *names, uint16_t size)
332 if (names == NULL || size < NUM_LATENCY_STATS)
333 return NUM_LATENCY_STATS;
335 for (i = 0; i < NUM_LATENCY_STATS; i++)
336 strlcpy(names[i].name, lat_stats_strings[i].name,
337 sizeof(names[i].name));
339 return NUM_LATENCY_STATS;
343 rte_latencystats_get(struct rte_metric_value *values, uint16_t size)
345 if (size < NUM_LATENCY_STATS || values == NULL)
346 return NUM_LATENCY_STATS;
348 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
349 const struct rte_memzone *mz;
350 mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
352 RTE_LOG(ERR, LATENCY_STATS,
353 "Latency stats memzone not found\n");
356 glob_stats = mz->addr;
359 /* Retrieve latency stats */
360 rte_latencystats_fill_values(values);
362 return NUM_LATENCY_STATS;