1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <rte_string_fns.h>
12 #include <rte_mbuf_dyn.h>
14 #include <rte_cycles.h>
15 #include <rte_ethdev.h>
16 #include <rte_metrics.h>
17 #include <rte_memzone.h>
18 #include <rte_lcore.h>
20 #include "rte_latencystats.h"
22 /** Nano seconds per second */
23 #define NS_PER_SEC 1E9
25 /** Clock cycles per nano second */
27 latencystat_cycles_per_ns(void)
29 return rte_get_timer_hz() / NS_PER_SEC;
32 /* Macros for printing using RTE_LOG */
33 #define RTE_LOGTYPE_LATENCY_STATS RTE_LOGTYPE_USER1
35 static uint64_t timestamp_dynflag;
36 static int timestamp_dynfield_offset = -1;
38 static inline rte_mbuf_timestamp_t *
39 timestamp_dynfield(struct rte_mbuf *mbuf)
41 return RTE_MBUF_DYNFIELD(mbuf,
42 timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
45 static const char *MZ_RTE_LATENCY_STATS = "rte_latencystats";
46 static int latency_stats_index;
47 static uint64_t samp_intvl;
48 static uint64_t timer_tsc;
49 static uint64_t prev_tsc;
51 struct rte_latency_stats {
52 float min_latency; /**< Minimum latency in nano seconds */
53 float avg_latency; /**< Average latency in nano seconds */
54 float max_latency; /**< Maximum latency in nano seconds */
55 float jitter; /** Latency variation */
56 rte_spinlock_t lock; /** Latency calculation lock */
59 static struct rte_latency_stats *glob_stats;
62 const struct rte_eth_rxtx_callback *cb;
65 static struct rxtx_cbs rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
66 static struct rxtx_cbs tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
68 struct latency_stats_nameoff {
69 char name[RTE_ETH_XSTATS_NAME_SIZE];
73 static const struct latency_stats_nameoff lat_stats_strings[] = {
74 {"min_latency_ns", offsetof(struct rte_latency_stats, min_latency)},
75 {"avg_latency_ns", offsetof(struct rte_latency_stats, avg_latency)},
76 {"max_latency_ns", offsetof(struct rte_latency_stats, max_latency)},
77 {"jitter_ns", offsetof(struct rte_latency_stats, jitter)},
80 #define NUM_LATENCY_STATS (sizeof(lat_stats_strings) / \
81 sizeof(lat_stats_strings[0]))
84 rte_latencystats_update(void)
87 float *stats_ptr = NULL;
88 uint64_t values[NUM_LATENCY_STATS] = {0};
91 for (i = 0; i < NUM_LATENCY_STATS; i++) {
92 stats_ptr = RTE_PTR_ADD(glob_stats,
93 lat_stats_strings[i].offset);
94 values[i] = (uint64_t)floor((*stats_ptr)/
95 latencystat_cycles_per_ns());
98 ret = rte_metrics_update_values(RTE_METRICS_GLOBAL,
100 values, NUM_LATENCY_STATS);
102 RTE_LOG(INFO, LATENCY_STATS, "Failed to push the stats\n");
108 rte_latencystats_fill_values(struct rte_metric_value *values)
111 float *stats_ptr = NULL;
113 for (i = 0; i < NUM_LATENCY_STATS; i++) {
114 stats_ptr = RTE_PTR_ADD(glob_stats,
115 lat_stats_strings[i].offset);
117 values[i].value = (uint64_t)floor((*stats_ptr)/
118 latencystat_cycles_per_ns());
123 add_time_stamps(uint16_t pid __rte_unused,
124 uint16_t qid __rte_unused,
125 struct rte_mbuf **pkts,
127 uint16_t max_pkts __rte_unused,
128 void *user_cb __rte_unused)
131 uint64_t diff_tsc, now;
134 * For every sample interval,
135 * time stamp is marked on one received packet.
138 for (i = 0; i < nb_pkts; i++) {
139 diff_tsc = now - prev_tsc;
140 timer_tsc += diff_tsc;
142 if ((pkts[i]->ol_flags & timestamp_dynflag) == 0
143 && (timer_tsc >= samp_intvl)) {
144 *timestamp_dynfield(pkts[i]) = now;
145 pkts[i]->ol_flags |= timestamp_dynflag;
156 calc_latency(uint16_t pid __rte_unused,
157 uint16_t qid __rte_unused,
158 struct rte_mbuf **pkts,
160 void *_ __rte_unused)
162 unsigned int i, cnt = 0;
164 float latency[nb_pkts];
165 static float prev_latency;
167 * Alpha represents degree of weighting decrease in EWMA,
168 * a constant smoothing factor between 0 and 1. The value
169 * is used below for measuring average latency.
171 const float alpha = 0.2;
174 for (i = 0; i < nb_pkts; i++) {
175 if (pkts[i]->ol_flags & timestamp_dynflag)
176 latency[cnt++] = now - *timestamp_dynfield(pkts[i]);
179 rte_spinlock_lock(&glob_stats->lock);
180 for (i = 0; i < cnt; i++) {
182 * The jitter is calculated as statistical mean of interpacket
183 * delay variation. The "jitter estimate" is computed by taking
184 * the absolute values of the ipdv sequence and applying an
185 * exponential filter with parameter 1/16 to generate the
186 * estimate. i.e J=J+(|D(i-1,i)|-J)/16. Where J is jitter,
187 * D(i-1,i) is difference in latency of two consecutive packets
189 * Reference: Calculated as per RFC 5481, sec 4.1,
190 * RFC 3393 sec 4.5, RFC 1889 sec.
192 glob_stats->jitter += (fabsf(prev_latency - latency[i])
193 - glob_stats->jitter)/16;
194 if (glob_stats->min_latency == 0)
195 glob_stats->min_latency = latency[i];
196 else if (latency[i] < glob_stats->min_latency)
197 glob_stats->min_latency = latency[i];
198 else if (latency[i] > glob_stats->max_latency)
199 glob_stats->max_latency = latency[i];
201 * The average latency is measured using exponential moving
202 * average, i.e. using EWMA
203 * https://en.wikipedia.org/wiki/Moving_average
205 glob_stats->avg_latency +=
206 alpha * (latency[i] - glob_stats->avg_latency);
207 prev_latency = latency[i];
209 rte_spinlock_unlock(&glob_stats->lock);
215 rte_latencystats_init(uint64_t app_samp_intvl,
216 rte_latency_stats_flow_type_fn user_cb)
221 struct rxtx_cbs *cbs = NULL;
222 const char *ptr_strings[NUM_LATENCY_STATS] = {0};
223 const struct rte_memzone *mz = NULL;
224 const unsigned int flags = 0;
227 if (rte_memzone_lookup(MZ_RTE_LATENCY_STATS))
230 /** Allocate stats in shared memory fo multi process support */
231 mz = rte_memzone_reserve(MZ_RTE_LATENCY_STATS, sizeof(*glob_stats),
232 rte_socket_id(), flags);
234 RTE_LOG(ERR, LATENCY_STATS, "Cannot reserve memory: %s:%d\n",
239 glob_stats = mz->addr;
240 rte_spinlock_init(&glob_stats->lock);
241 samp_intvl = app_samp_intvl * latencystat_cycles_per_ns();
243 /** Register latency stats with stats library */
244 for (i = 0; i < NUM_LATENCY_STATS; i++)
245 ptr_strings[i] = lat_stats_strings[i].name;
247 latency_stats_index = rte_metrics_reg_names(ptr_strings,
249 if (latency_stats_index < 0) {
250 RTE_LOG(DEBUG, LATENCY_STATS,
251 "Failed to register latency stats names\n");
255 /* Register mbuf field and flag for Rx timestamp */
256 ret = rte_mbuf_dyn_rx_timestamp_register(×tamp_dynfield_offset,
259 RTE_LOG(ERR, LATENCY_STATS,
260 "Cannot register mbuf field/flag for timestamp\n");
264 /** Register Rx/Tx callbacks */
265 RTE_ETH_FOREACH_DEV(pid) {
266 struct rte_eth_dev_info dev_info;
268 ret = rte_eth_dev_info_get(pid, &dev_info);
270 RTE_LOG(INFO, LATENCY_STATS,
271 "Error during getting device (port %u) info: %s\n",
272 pid, strerror(-ret));
277 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
278 cbs = &rx_cbs[pid][qid];
279 cbs->cb = rte_eth_add_first_rx_callback(pid, qid,
280 add_time_stamps, user_cb);
282 RTE_LOG(INFO, LATENCY_STATS, "Failed to "
283 "register Rx callback for pid=%d, "
284 "qid=%d\n", pid, qid);
286 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
287 cbs = &tx_cbs[pid][qid];
288 cbs->cb = rte_eth_add_tx_callback(pid, qid,
289 calc_latency, user_cb);
291 RTE_LOG(INFO, LATENCY_STATS, "Failed to "
292 "register Tx callback for pid=%d, "
293 "qid=%d\n", pid, qid);
300 rte_latencystats_uninit(void)
305 struct rxtx_cbs *cbs = NULL;
306 const struct rte_memzone *mz = NULL;
308 /** De register Rx/Tx callbacks */
309 RTE_ETH_FOREACH_DEV(pid) {
310 struct rte_eth_dev_info dev_info;
312 ret = rte_eth_dev_info_get(pid, &dev_info);
314 RTE_LOG(INFO, LATENCY_STATS,
315 "Error during getting device (port %u) info: %s\n",
316 pid, strerror(-ret));
321 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
322 cbs = &rx_cbs[pid][qid];
323 ret = rte_eth_remove_rx_callback(pid, qid, cbs->cb);
325 RTE_LOG(INFO, LATENCY_STATS, "failed to "
326 "remove Rx callback for pid=%d, "
327 "qid=%d\n", pid, qid);
329 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
330 cbs = &tx_cbs[pid][qid];
331 ret = rte_eth_remove_tx_callback(pid, qid, cbs->cb);
333 RTE_LOG(INFO, LATENCY_STATS, "failed to "
334 "remove Tx callback for pid=%d, "
335 "qid=%d\n", pid, qid);
339 /* free up the memzone */
340 mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
342 rte_memzone_free(mz);
348 rte_latencystats_get_names(struct rte_metric_name *names, uint16_t size)
352 if (names == NULL || size < NUM_LATENCY_STATS)
353 return NUM_LATENCY_STATS;
355 for (i = 0; i < NUM_LATENCY_STATS; i++)
356 strlcpy(names[i].name, lat_stats_strings[i].name,
357 sizeof(names[i].name));
359 return NUM_LATENCY_STATS;
363 rte_latencystats_get(struct rte_metric_value *values, uint16_t size)
365 if (size < NUM_LATENCY_STATS || values == NULL)
366 return NUM_LATENCY_STATS;
368 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
369 const struct rte_memzone *mz;
370 mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
372 RTE_LOG(ERR, LATENCY_STATS,
373 "Latency stats memzone not found\n");
376 glob_stats = mz->addr;
379 /* Retrieve latency stats */
380 rte_latencystats_fill_values(values);
382 return NUM_LATENCY_STATS;