4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/types.h>
40 #include <rte_cycles.h>
41 #include <rte_ethdev.h>
42 #include <rte_metrics.h>
43 #include <rte_memzone.h>
44 #include <rte_lcore.h>
45 #include <rte_timer.h>
47 #include "rte_latencystats.h"
49 /** Nano seconds per second */
50 #define NS_PER_SEC 1E9
52 /** Clock cycles per nano second */
54 latencystat_cycles_per_ns(void)
56 return rte_get_timer_hz() / NS_PER_SEC;
59 /* Macros for printing using RTE_LOG */
60 #define RTE_LOGTYPE_LATENCY_STATS RTE_LOGTYPE_USER1
62 static const char *MZ_RTE_LATENCY_STATS = "rte_latencystats";
63 static int latency_stats_index;
64 static uint64_t samp_intvl;
65 static uint64_t timer_tsc;
66 static uint64_t prev_tsc;
68 struct rte_latency_stats {
69 float min_latency; /**< Minimum latency in nano seconds */
70 float avg_latency; /**< Average latency in nano seconds */
71 float max_latency; /**< Maximum latency in nano seconds */
72 float jitter; /** Latency variation */
75 static struct rte_latency_stats *glob_stats;
78 struct rte_eth_rxtx_callback *cb;
81 static struct rxtx_cbs rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
82 static struct rxtx_cbs tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
84 struct latency_stats_nameoff {
85 char name[RTE_ETH_XSTATS_NAME_SIZE];
89 static const struct latency_stats_nameoff lat_stats_strings[] = {
90 {"min_latency_ns", offsetof(struct rte_latency_stats, min_latency)},
91 {"avg_latency_ns", offsetof(struct rte_latency_stats, avg_latency)},
92 {"max_latency_ns", offsetof(struct rte_latency_stats, max_latency)},
93 {"jitter_ns", offsetof(struct rte_latency_stats, jitter)},
96 #define NUM_LATENCY_STATS (sizeof(lat_stats_strings) / \
97 sizeof(lat_stats_strings[0]))
100 rte_latencystats_update(void)
103 float *stats_ptr = NULL;
104 uint64_t values[NUM_LATENCY_STATS] = {0};
107 for (i = 0; i < NUM_LATENCY_STATS; i++) {
108 stats_ptr = RTE_PTR_ADD(glob_stats,
109 lat_stats_strings[i].offset);
110 values[i] = (uint64_t)floor((*stats_ptr)/
111 latencystat_cycles_per_ns());
114 ret = rte_metrics_update_values(RTE_METRICS_GLOBAL,
116 values, NUM_LATENCY_STATS);
118 RTE_LOG(INFO, LATENCY_STATS, "Failed to push the stats\n");
124 rte_latencystats_fill_values(struct rte_metric_value *values)
127 float *stats_ptr = NULL;
129 for (i = 0; i < NUM_LATENCY_STATS; i++) {
130 stats_ptr = RTE_PTR_ADD(glob_stats,
131 lat_stats_strings[i].offset);
133 values[i].value = (uint64_t)floor((*stats_ptr)/
134 latencystat_cycles_per_ns());
139 add_time_stamps(uint8_t pid __rte_unused,
140 uint16_t qid __rte_unused,
141 struct rte_mbuf **pkts,
143 uint16_t max_pkts __rte_unused,
144 void *user_cb __rte_unused)
147 uint64_t diff_tsc, now;
150 * For every sample interval,
151 * time stamp is marked on one received packet.
154 for (i = 0; i < nb_pkts; i++) {
155 diff_tsc = now - prev_tsc;
156 timer_tsc += diff_tsc;
157 if (timer_tsc >= samp_intvl) {
158 pkts[i]->timestamp = now;
169 calc_latency(uint8_t pid __rte_unused,
170 uint16_t qid __rte_unused,
171 struct rte_mbuf **pkts,
173 void *_ __rte_unused)
175 unsigned int i, cnt = 0;
177 float latency[nb_pkts];
178 static float prev_latency;
180 * Alpha represents degree of weighting decrease in EWMA,
181 * a constant smoothing factor between 0 and 1. The value
182 * is used below for measuring average latency.
184 const float alpha = 0.2;
187 for (i = 0; i < nb_pkts; i++) {
188 if (pkts[i]->timestamp)
189 latency[cnt++] = now - pkts[i]->timestamp;
192 for (i = 0; i < cnt; i++) {
194 * The jitter is calculated as statistical mean of interpacket
195 * delay variation. The "jitter estimate" is computed by taking
196 * the absolute values of the ipdv sequence and applying an
197 * exponential filter with parameter 1/16 to generate the
198 * estimate. i.e J=J+(|D(i-1,i)|-J)/16. Where J is jitter,
199 * D(i-1,i) is difference in latency of two consecutive packets
201 * Reference: Calculated as per RFC 5481, sec 4.1,
202 * RFC 3393 sec 4.5, RFC 1889 sec.
204 glob_stats->jitter += (fabsf(prev_latency - latency[i])
205 - glob_stats->jitter)/16;
206 if (glob_stats->min_latency == 0)
207 glob_stats->min_latency = latency[i];
208 else if (latency[i] < glob_stats->min_latency)
209 glob_stats->min_latency = latency[i];
210 else if (latency[i] > glob_stats->max_latency)
211 glob_stats->max_latency = latency[i];
213 * The average latency is measured using exponential moving
214 * average, i.e. using EWMA
215 * https://en.wikipedia.org/wiki/Moving_average
217 glob_stats->avg_latency +=
218 alpha * (latency[i] - glob_stats->avg_latency);
219 prev_latency = latency[i];
226 rte_latencystats_init(uint64_t app_samp_intvl,
227 rte_latency_stats_flow_type_fn user_cb)
232 struct rxtx_cbs *cbs = NULL;
233 const uint8_t nb_ports = rte_eth_dev_count();
234 const char *ptr_strings[NUM_LATENCY_STATS] = {0};
235 const struct rte_memzone *mz = NULL;
236 const unsigned int flags = 0;
238 if (rte_memzone_lookup(MZ_RTE_LATENCY_STATS))
241 /** Allocate stats in shared memory fo multi process support */
242 mz = rte_memzone_reserve(MZ_RTE_LATENCY_STATS, sizeof(*glob_stats),
243 rte_socket_id(), flags);
245 RTE_LOG(ERR, LATENCY_STATS, "Cannot reserve memory: %s:%d\n",
250 glob_stats = mz->addr;
251 samp_intvl = app_samp_intvl * latencystat_cycles_per_ns();
253 /** Register latency stats with stats library */
254 for (i = 0; i < NUM_LATENCY_STATS; i++)
255 ptr_strings[i] = lat_stats_strings[i].name;
257 latency_stats_index = rte_metrics_reg_names(ptr_strings,
259 if (latency_stats_index < 0) {
260 RTE_LOG(DEBUG, LATENCY_STATS,
261 "Failed to register latency stats names\n");
265 /** Register Rx/Tx callbacks */
266 for (pid = 0; pid < nb_ports; pid++) {
267 struct rte_eth_dev_info dev_info;
268 rte_eth_dev_info_get(pid, &dev_info);
269 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
270 cbs = &rx_cbs[pid][qid];
271 cbs->cb = rte_eth_add_first_rx_callback(pid, qid,
272 add_time_stamps, user_cb);
274 RTE_LOG(INFO, LATENCY_STATS, "Failed to "
275 "register Rx callback for pid=%d, "
276 "qid=%d\n", pid, qid);
278 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
279 cbs = &tx_cbs[pid][qid];
280 cbs->cb = rte_eth_add_tx_callback(pid, qid,
281 calc_latency, user_cb);
283 RTE_LOG(INFO, LATENCY_STATS, "Failed to "
284 "register Tx callback for pid=%d, "
285 "qid=%d\n", pid, qid);
292 rte_latencystats_uninit(void)
297 struct rxtx_cbs *cbs = NULL;
298 const uint8_t nb_ports = rte_eth_dev_count();
300 /** De register Rx/Tx callbacks */
301 for (pid = 0; pid < nb_ports; pid++) {
302 struct rte_eth_dev_info dev_info;
303 rte_eth_dev_info_get(pid, &dev_info);
304 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
305 cbs = &rx_cbs[pid][qid];
306 ret = rte_eth_remove_rx_callback(pid, qid, cbs->cb);
308 RTE_LOG(INFO, LATENCY_STATS, "failed to "
309 "remove Rx callback for pid=%d, "
310 "qid=%d\n", pid, qid);
312 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
313 cbs = &tx_cbs[pid][qid];
314 ret = rte_eth_remove_tx_callback(pid, qid, cbs->cb);
316 RTE_LOG(INFO, LATENCY_STATS, "failed to "
317 "remove Tx callback for pid=%d, "
318 "qid=%d\n", pid, qid);
326 rte_latencystats_get_names(struct rte_metric_name *names, uint16_t size)
330 if (names == NULL || size < NUM_LATENCY_STATS)
331 return NUM_LATENCY_STATS;
333 for (i = 0; i < NUM_LATENCY_STATS; i++)
334 snprintf(names[i].name, sizeof(names[i].name),
335 "%s", lat_stats_strings[i].name);
337 return NUM_LATENCY_STATS;
341 rte_latencystats_get(struct rte_metric_value *values, uint16_t size)
343 if (size < NUM_LATENCY_STATS || values == NULL)
344 return NUM_LATENCY_STATS;
346 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
347 const struct rte_memzone *mz;
348 mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
350 RTE_LOG(ERR, LATENCY_STATS,
351 "Latency stats memzone not found\n");
354 glob_stats = mz->addr;
357 /* Retrieve latency stats */
358 rte_latencystats_fill_values(values);
360 return NUM_LATENCY_STATS;