test mbuf attach
[dpdk.git] / lib / librte_latencystats / rte_latencystats.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <unistd.h>
6 #include <sys/types.h>
7 #include <stdbool.h>
8 #include <math.h>
9
10 #include <rte_string_fns.h>
11 #include <rte_mbuf.h>
12 #include <rte_log.h>
13 #include <rte_cycles.h>
14 #include <rte_ethdev.h>
15 #include <rte_metrics.h>
16 #include <rte_memzone.h>
17 #include <rte_lcore.h>
18
19 #include "rte_latencystats.h"
20
21 /** Nano seconds per second */
22 #define NS_PER_SEC 1E9
23
24 /** Clock cycles per nano second */
25 static uint64_t
26 latencystat_cycles_per_ns(void)
27 {
28         return rte_get_timer_hz() / NS_PER_SEC;
29 }
30
31 /* Macros for printing using RTE_LOG */
32 #define RTE_LOGTYPE_LATENCY_STATS RTE_LOGTYPE_USER1
33
34 static const char *MZ_RTE_LATENCY_STATS = "rte_latencystats";
35 static int latency_stats_index;
36 static uint64_t samp_intvl;
37 static uint64_t timer_tsc;
38 static uint64_t prev_tsc;
39
40 struct rte_latency_stats {
41         float min_latency; /**< Minimum latency in nano seconds */
42         float avg_latency; /**< Average latency in nano seconds */
43         float max_latency; /**< Maximum latency in nano seconds */
44         float jitter; /** Latency variation */
45         rte_spinlock_t lock; /** Latency calculation lock */
46 };
47
48 static struct rte_latency_stats *glob_stats;
49
50 struct rxtx_cbs {
51         const struct rte_eth_rxtx_callback *cb;
52 };
53
54 static struct rxtx_cbs rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
55 static struct rxtx_cbs tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
56
57 struct latency_stats_nameoff {
58         char name[RTE_ETH_XSTATS_NAME_SIZE];
59         unsigned int offset;
60 };
61
62 static const struct latency_stats_nameoff lat_stats_strings[] = {
63         {"min_latency_ns", offsetof(struct rte_latency_stats, min_latency)},
64         {"avg_latency_ns", offsetof(struct rte_latency_stats, avg_latency)},
65         {"max_latency_ns", offsetof(struct rte_latency_stats, max_latency)},
66         {"jitter_ns", offsetof(struct rte_latency_stats, jitter)},
67 };
68
69 #define NUM_LATENCY_STATS (sizeof(lat_stats_strings) / \
70                                 sizeof(lat_stats_strings[0]))
71
72 int32_t
73 rte_latencystats_update(void)
74 {
75         unsigned int i;
76         float *stats_ptr = NULL;
77         uint64_t values[NUM_LATENCY_STATS] = {0};
78         int ret;
79
80         for (i = 0; i < NUM_LATENCY_STATS; i++) {
81                 stats_ptr = RTE_PTR_ADD(glob_stats,
82                                 lat_stats_strings[i].offset);
83                 values[i] = (uint64_t)floor((*stats_ptr)/
84                                 latencystat_cycles_per_ns());
85         }
86
87         ret = rte_metrics_update_values(RTE_METRICS_GLOBAL,
88                                         latency_stats_index,
89                                         values, NUM_LATENCY_STATS);
90         if (ret < 0)
91                 RTE_LOG(INFO, LATENCY_STATS, "Failed to push the stats\n");
92
93         return ret;
94 }
95
96 static void
97 rte_latencystats_fill_values(struct rte_metric_value *values)
98 {
99         unsigned int i;
100         float *stats_ptr = NULL;
101
102         for (i = 0; i < NUM_LATENCY_STATS; i++) {
103                 stats_ptr = RTE_PTR_ADD(glob_stats,
104                                 lat_stats_strings[i].offset);
105                 values[i].key = i;
106                 values[i].value = (uint64_t)floor((*stats_ptr)/
107                                                 latencystat_cycles_per_ns());
108         }
109 }
110
111 static uint16_t
112 add_time_stamps(uint16_t pid __rte_unused,
113                 uint16_t qid __rte_unused,
114                 struct rte_mbuf **pkts,
115                 uint16_t nb_pkts,
116                 uint16_t max_pkts __rte_unused,
117                 void *user_cb __rte_unused)
118 {
119         unsigned int i;
120         uint64_t diff_tsc, now;
121
122         /*
123          * For every sample interval,
124          * time stamp is marked on one received packet.
125          */
126         now = rte_rdtsc();
127         for (i = 0; i < nb_pkts; i++) {
128                 diff_tsc = now - prev_tsc;
129                 timer_tsc += diff_tsc;
130
131                 if ((pkts[i]->ol_flags & PKT_RX_TIMESTAMP) == 0
132                                 && (timer_tsc >= samp_intvl)) {
133                         pkts[i]->timestamp = now;
134                         pkts[i]->ol_flags |= PKT_RX_TIMESTAMP;
135                         timer_tsc = 0;
136                 }
137                 prev_tsc = now;
138                 now = rte_rdtsc();
139         }
140
141         return nb_pkts;
142 }
143
144 static uint16_t
145 calc_latency(uint16_t pid __rte_unused,
146                 uint16_t qid __rte_unused,
147                 struct rte_mbuf **pkts,
148                 uint16_t nb_pkts,
149                 void *_ __rte_unused)
150 {
151         unsigned int i, cnt = 0;
152         uint64_t now;
153         float latency[nb_pkts];
154         static float prev_latency;
155         /*
156          * Alpha represents degree of weighting decrease in EWMA,
157          * a constant smoothing factor between 0 and 1. The value
158          * is used below for measuring average latency.
159          */
160         const float alpha = 0.2;
161
162         now = rte_rdtsc();
163         for (i = 0; i < nb_pkts; i++) {
164                 if (pkts[i]->ol_flags & PKT_RX_TIMESTAMP)
165                         latency[cnt++] = now - pkts[i]->timestamp;
166         }
167
168         rte_spinlock_lock(&glob_stats->lock);
169         for (i = 0; i < cnt; i++) {
170                 /*
171                  * The jitter is calculated as statistical mean of interpacket
172                  * delay variation. The "jitter estimate" is computed by taking
173                  * the absolute values of the ipdv sequence and applying an
174                  * exponential filter with parameter 1/16 to generate the
175                  * estimate. i.e J=J+(|D(i-1,i)|-J)/16. Where J is jitter,
176                  * D(i-1,i) is difference in latency of two consecutive packets
177                  * i-1 and i.
178                  * Reference: Calculated as per RFC 5481, sec 4.1,
179                  * RFC 3393 sec 4.5, RFC 1889 sec.
180                  */
181                 glob_stats->jitter +=  (fabsf(prev_latency - latency[i])
182                                         - glob_stats->jitter)/16;
183                 if (glob_stats->min_latency == 0)
184                         glob_stats->min_latency = latency[i];
185                 else if (latency[i] < glob_stats->min_latency)
186                         glob_stats->min_latency = latency[i];
187                 else if (latency[i] > glob_stats->max_latency)
188                         glob_stats->max_latency = latency[i];
189                 /*
190                  * The average latency is measured using exponential moving
191                  * average, i.e. using EWMA
192                  * https://en.wikipedia.org/wiki/Moving_average
193                  */
194                 glob_stats->avg_latency +=
195                         alpha * (latency[i] - glob_stats->avg_latency);
196                 prev_latency = latency[i];
197         }
198         rte_spinlock_unlock(&glob_stats->lock);
199
200         return nb_pkts;
201 }
202
203 int
204 rte_latencystats_init(uint64_t app_samp_intvl,
205                 rte_latency_stats_flow_type_fn user_cb)
206 {
207         unsigned int i;
208         uint16_t pid;
209         uint16_t qid;
210         struct rxtx_cbs *cbs = NULL;
211         const char *ptr_strings[NUM_LATENCY_STATS] = {0};
212         const struct rte_memzone *mz = NULL;
213         const unsigned int flags = 0;
214         int ret;
215
216         if (rte_memzone_lookup(MZ_RTE_LATENCY_STATS))
217                 return -EEXIST;
218
219         /** Allocate stats in shared memory fo multi process support */
220         mz = rte_memzone_reserve(MZ_RTE_LATENCY_STATS, sizeof(*glob_stats),
221                                         rte_socket_id(), flags);
222         if (mz == NULL) {
223                 RTE_LOG(ERR, LATENCY_STATS, "Cannot reserve memory: %s:%d\n",
224                         __func__, __LINE__);
225                 return -ENOMEM;
226         }
227
228         glob_stats = mz->addr;
229         rte_spinlock_init(&glob_stats->lock);
230         samp_intvl = app_samp_intvl * latencystat_cycles_per_ns();
231
232         /** Register latency stats with stats library */
233         for (i = 0; i < NUM_LATENCY_STATS; i++)
234                 ptr_strings[i] = lat_stats_strings[i].name;
235
236         latency_stats_index = rte_metrics_reg_names(ptr_strings,
237                                                         NUM_LATENCY_STATS);
238         if (latency_stats_index < 0) {
239                 RTE_LOG(DEBUG, LATENCY_STATS,
240                         "Failed to register latency stats names\n");
241                 return -1;
242         }
243
244         /** Register Rx/Tx callbacks */
245         RTE_ETH_FOREACH_DEV(pid) {
246                 struct rte_eth_dev_info dev_info;
247
248                 ret = rte_eth_dev_info_get(pid, &dev_info);
249                 if (ret != 0) {
250                         RTE_LOG(INFO, LATENCY_STATS,
251                                 "Error during getting device (port %u) info: %s\n",
252                                 pid, strerror(-ret));
253
254                         continue;
255                 }
256
257                 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
258                         cbs = &rx_cbs[pid][qid];
259                         cbs->cb = rte_eth_add_first_rx_callback(pid, qid,
260                                         add_time_stamps, user_cb);
261                         if (!cbs->cb)
262                                 RTE_LOG(INFO, LATENCY_STATS, "Failed to "
263                                         "register Rx callback for pid=%d, "
264                                         "qid=%d\n", pid, qid);
265                 }
266                 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
267                         cbs = &tx_cbs[pid][qid];
268                         cbs->cb =  rte_eth_add_tx_callback(pid, qid,
269                                         calc_latency, user_cb);
270                         if (!cbs->cb)
271                                 RTE_LOG(INFO, LATENCY_STATS, "Failed to "
272                                         "register Tx callback for pid=%d, "
273                                         "qid=%d\n", pid, qid);
274                 }
275         }
276         return 0;
277 }
278
279 int
280 rte_latencystats_uninit(void)
281 {
282         uint16_t pid;
283         uint16_t qid;
284         int ret = 0;
285         struct rxtx_cbs *cbs = NULL;
286         const struct rte_memzone *mz = NULL;
287
288         /** De register Rx/Tx callbacks */
289         RTE_ETH_FOREACH_DEV(pid) {
290                 struct rte_eth_dev_info dev_info;
291
292                 ret = rte_eth_dev_info_get(pid, &dev_info);
293                 if (ret != 0) {
294                         RTE_LOG(INFO, LATENCY_STATS,
295                                 "Error during getting device (port %u) info: %s\n",
296                                 pid, strerror(-ret));
297
298                         continue;
299                 }
300
301                 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
302                         cbs = &rx_cbs[pid][qid];
303                         ret = rte_eth_remove_rx_callback(pid, qid, cbs->cb);
304                         if (ret)
305                                 RTE_LOG(INFO, LATENCY_STATS, "failed to "
306                                         "remove Rx callback for pid=%d, "
307                                         "qid=%d\n", pid, qid);
308                 }
309                 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
310                         cbs = &tx_cbs[pid][qid];
311                         ret = rte_eth_remove_tx_callback(pid, qid, cbs->cb);
312                         if (ret)
313                                 RTE_LOG(INFO, LATENCY_STATS, "failed to "
314                                         "remove Tx callback for pid=%d, "
315                                         "qid=%d\n", pid, qid);
316                 }
317         }
318
319         /* free up the memzone */
320         mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
321         if (mz)
322                 rte_memzone_free(mz);
323
324         return 0;
325 }
326
327 int
328 rte_latencystats_get_names(struct rte_metric_name *names, uint16_t size)
329 {
330         unsigned int i;
331
332         if (names == NULL || size < NUM_LATENCY_STATS)
333                 return NUM_LATENCY_STATS;
334
335         for (i = 0; i < NUM_LATENCY_STATS; i++)
336                 strlcpy(names[i].name, lat_stats_strings[i].name,
337                         sizeof(names[i].name));
338
339         return NUM_LATENCY_STATS;
340 }
341
342 int
343 rte_latencystats_get(struct rte_metric_value *values, uint16_t size)
344 {
345         if (size < NUM_LATENCY_STATS || values == NULL)
346                 return NUM_LATENCY_STATS;
347
348         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
349                 const struct rte_memzone *mz;
350                 mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
351                 if (mz == NULL) {
352                         RTE_LOG(ERR, LATENCY_STATS,
353                                 "Latency stats memzone not found\n");
354                         return -ENOMEM;
355                 }
356                 glob_stats =  mz->addr;
357         }
358
359         /* Retrieve latency stats */
360         rte_latencystats_fill_values(values);
361
362         return NUM_LATENCY_STATS;
363 }