71602685d5444dc6edb7390eebb4bd51658c08d1
[dpdk.git] / lib / pdump / rte_pdump.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2018 Intel Corporation
3  */
4
5 #include <rte_memcpy.h>
6 #include <rte_mbuf.h>
7 #include <rte_ethdev.h>
8 #include <rte_lcore.h>
9 #include <rte_log.h>
10 #include <rte_memzone.h>
11 #include <rte_errno.h>
12 #include <rte_string_fns.h>
13 #include <rte_pcapng.h>
14
15 #include "rte_pdump.h"
16
17 RTE_LOG_REGISTER_DEFAULT(pdump_logtype, NOTICE);
18
19 /* Macro for printing using RTE_LOG */
20 #define PDUMP_LOG(level, fmt, args...)                          \
21         rte_log(RTE_LOG_ ## level, pdump_logtype, "%s(): " fmt, \
22                 __func__, ## args)
23
24 /* Used for the multi-process communication */
25 #define PDUMP_MP        "mp_pdump"
26
27 enum pdump_operation {
28         DISABLE = 1,
29         ENABLE = 2
30 };
31
32 /* Internal version number in request */
33 enum pdump_version {
34         V1 = 1,             /* no filtering or snap */
35         V2 = 2,
36 };
37
38 struct pdump_request {
39         uint16_t ver;
40         uint16_t op;
41         uint32_t flags;
42         char device[RTE_DEV_NAME_MAX_LEN];
43         uint16_t queue;
44         struct rte_ring *ring;
45         struct rte_mempool *mp;
46
47         const struct rte_bpf_prm *prm;
48         uint32_t snaplen;
49 };
50
51 struct pdump_response {
52         uint16_t ver;
53         uint16_t res_op;
54         int32_t err_value;
55 };
56
57 static struct pdump_rxtx_cbs {
58         struct rte_ring *ring;
59         struct rte_mempool *mp;
60         const struct rte_eth_rxtx_callback *cb;
61         const struct rte_bpf *filter;
62         enum pdump_version ver;
63         uint32_t snaplen;
64 } rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
65 tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
66
67
68 /*
69  * The packet capture statistics keep track of packets
70  * accepted, filtered and dropped. These are per-queue
71  * and in memory between primary and secondary processes.
72  */
73 static const char MZ_RTE_PDUMP_STATS[] = "rte_pdump_stats";
74 static struct {
75         struct rte_pdump_stats rx[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
76         struct rte_pdump_stats tx[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
77 } *pdump_stats;
78
79 /* Create a clone of mbuf to be placed into ring. */
80 static void
81 pdump_copy(uint16_t port_id, uint16_t queue,
82            enum rte_pcapng_direction direction,
83            struct rte_mbuf **pkts, uint16_t nb_pkts,
84            const struct pdump_rxtx_cbs *cbs,
85            struct rte_pdump_stats *stats)
86 {
87         unsigned int i;
88         int ring_enq;
89         uint16_t d_pkts = 0;
90         struct rte_mbuf *dup_bufs[nb_pkts];
91         uint64_t ts;
92         struct rte_ring *ring;
93         struct rte_mempool *mp;
94         struct rte_mbuf *p;
95         uint64_t rcs[nb_pkts];
96
97         if (cbs->filter)
98                 rte_bpf_exec_burst(cbs->filter, (void **)pkts, rcs, nb_pkts);
99
100         ts = rte_get_tsc_cycles();
101         ring = cbs->ring;
102         mp = cbs->mp;
103         for (i = 0; i < nb_pkts; i++) {
104                 /*
105                  * This uses same BPF return value convention as socket filter
106                  * and pcap_offline_filter.
107                  * if program returns zero
108                  * then packet doesn't match the filter (will be ignored).
109                  */
110                 if (cbs->filter && rcs[i] == 0) {
111                         __atomic_fetch_add(&stats->filtered,
112                                            1, __ATOMIC_RELAXED);
113                         continue;
114                 }
115
116                 /*
117                  * If using pcapng then want to wrap packets
118                  * otherwise a simple copy.
119                  */
120                 if (cbs->ver == V2)
121                         p = rte_pcapng_copy(port_id, queue,
122                                             pkts[i], mp, cbs->snaplen,
123                                             ts, direction);
124                 else
125                         p = rte_pktmbuf_copy(pkts[i], mp, 0, cbs->snaplen);
126
127                 if (unlikely(p == NULL))
128                         __atomic_fetch_add(&stats->nombuf, 1, __ATOMIC_RELAXED);
129                 else
130                         dup_bufs[d_pkts++] = p;
131         }
132
133         __atomic_fetch_add(&stats->accepted, d_pkts, __ATOMIC_RELAXED);
134
135         ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
136         if (unlikely(ring_enq < d_pkts)) {
137                 unsigned int drops = d_pkts - ring_enq;
138
139                 __atomic_fetch_add(&stats->ringfull, drops, __ATOMIC_RELAXED);
140                 rte_pktmbuf_free_bulk(&dup_bufs[ring_enq], drops);
141         }
142 }
143
144 static uint16_t
145 pdump_rx(uint16_t port, uint16_t queue,
146         struct rte_mbuf **pkts, uint16_t nb_pkts,
147         uint16_t max_pkts __rte_unused, void *user_params)
148 {
149         const struct pdump_rxtx_cbs *cbs = user_params;
150         struct rte_pdump_stats *stats = &pdump_stats->rx[port][queue];
151
152         pdump_copy(port, queue, RTE_PCAPNG_DIRECTION_IN,
153                    pkts, nb_pkts, cbs, stats);
154         return nb_pkts;
155 }
156
157 static uint16_t
158 pdump_tx(uint16_t port, uint16_t queue,
159                 struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
160 {
161         const struct pdump_rxtx_cbs *cbs = user_params;
162         struct rte_pdump_stats *stats = &pdump_stats->tx[port][queue];
163
164         pdump_copy(port, queue, RTE_PCAPNG_DIRECTION_OUT,
165                    pkts, nb_pkts, cbs, stats);
166         return nb_pkts;
167 }
168
169 static int
170 pdump_register_rx_callbacks(enum pdump_version ver,
171                             uint16_t end_q, uint16_t port, uint16_t queue,
172                             struct rte_ring *ring, struct rte_mempool *mp,
173                             struct rte_bpf *filter,
174                             uint16_t operation, uint32_t snaplen)
175 {
176         uint16_t qid;
177
178         qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
179         for (; qid < end_q; qid++) {
180                 struct pdump_rxtx_cbs *cbs = &rx_cbs[port][qid];
181
182                 if (operation == ENABLE) {
183                         if (cbs->cb) {
184                                 PDUMP_LOG(ERR,
185                                         "rx callback for port=%d queue=%d, already exists\n",
186                                         port, qid);
187                                 return -EEXIST;
188                         }
189                         cbs->ver = ver;
190                         cbs->ring = ring;
191                         cbs->mp = mp;
192                         cbs->snaplen = snaplen;
193                         cbs->filter = filter;
194
195                         cbs->cb = rte_eth_add_first_rx_callback(port, qid,
196                                                                 pdump_rx, cbs);
197                         if (cbs->cb == NULL) {
198                                 PDUMP_LOG(ERR,
199                                         "failed to add rx callback, errno=%d\n",
200                                         rte_errno);
201                                 return rte_errno;
202                         }
203                 } else if (operation == DISABLE) {
204                         int ret;
205
206                         if (cbs->cb == NULL) {
207                                 PDUMP_LOG(ERR,
208                                         "no existing rx callback for port=%d queue=%d\n",
209                                         port, qid);
210                                 return -EINVAL;
211                         }
212                         ret = rte_eth_remove_rx_callback(port, qid, cbs->cb);
213                         if (ret < 0) {
214                                 PDUMP_LOG(ERR,
215                                         "failed to remove rx callback, errno=%d\n",
216                                         -ret);
217                                 return ret;
218                         }
219                         cbs->cb = NULL;
220                 }
221         }
222
223         return 0;
224 }
225
226 static int
227 pdump_register_tx_callbacks(enum pdump_version ver,
228                             uint16_t end_q, uint16_t port, uint16_t queue,
229                             struct rte_ring *ring, struct rte_mempool *mp,
230                             struct rte_bpf *filter,
231                             uint16_t operation, uint32_t snaplen)
232 {
233
234         uint16_t qid;
235
236         qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
237         for (; qid < end_q; qid++) {
238                 struct pdump_rxtx_cbs *cbs = &tx_cbs[port][qid];
239
240                 if (operation == ENABLE) {
241                         if (cbs->cb) {
242                                 PDUMP_LOG(ERR,
243                                         "tx callback for port=%d queue=%d, already exists\n",
244                                         port, qid);
245                                 return -EEXIST;
246                         }
247                         cbs->ver = ver;
248                         cbs->ring = ring;
249                         cbs->mp = mp;
250                         cbs->snaplen = snaplen;
251                         cbs->filter = filter;
252
253                         cbs->cb = rte_eth_add_tx_callback(port, qid, pdump_tx,
254                                                                 cbs);
255                         if (cbs->cb == NULL) {
256                                 PDUMP_LOG(ERR,
257                                         "failed to add tx callback, errno=%d\n",
258                                         rte_errno);
259                                 return rte_errno;
260                         }
261                 } else if (operation == DISABLE) {
262                         int ret;
263
264                         if (cbs->cb == NULL) {
265                                 PDUMP_LOG(ERR,
266                                         "no existing tx callback for port=%d queue=%d\n",
267                                         port, qid);
268                                 return -EINVAL;
269                         }
270                         ret = rte_eth_remove_tx_callback(port, qid, cbs->cb);
271                         if (ret < 0) {
272                                 PDUMP_LOG(ERR,
273                                         "failed to remove tx callback, errno=%d\n",
274                                         -ret);
275                                 return ret;
276                         }
277                         cbs->cb = NULL;
278                 }
279         }
280
281         return 0;
282 }
283
284 static int
285 set_pdump_rxtx_cbs(const struct pdump_request *p)
286 {
287         uint16_t nb_rx_q = 0, nb_tx_q = 0, end_q, queue;
288         uint16_t port;
289         int ret = 0;
290         struct rte_bpf *filter = NULL;
291         uint32_t flags;
292         uint16_t operation;
293         struct rte_ring *ring;
294         struct rte_mempool *mp;
295
296         /* Check for possible DPDK version mismatch */
297         if (!(p->ver == V1 || p->ver == V2)) {
298                 PDUMP_LOG(ERR,
299                           "incorrect client version %u\n", p->ver);
300                 return -EINVAL;
301         }
302
303         if (p->prm) {
304                 if (p->prm->prog_arg.type != RTE_BPF_ARG_PTR_MBUF) {
305                         PDUMP_LOG(ERR,
306                                   "invalid BPF program type: %u\n",
307                                   p->prm->prog_arg.type);
308                         return -EINVAL;
309                 }
310
311                 filter = rte_bpf_load(p->prm);
312                 if (filter == NULL) {
313                         PDUMP_LOG(ERR, "cannot load BPF filter: %s\n",
314                                   rte_strerror(rte_errno));
315                         return -rte_errno;
316                 }
317         }
318
319         flags = p->flags;
320         operation = p->op;
321         queue = p->queue;
322         ring = p->ring;
323         mp = p->mp;
324
325         ret = rte_eth_dev_get_port_by_name(p->device, &port);
326         if (ret < 0) {
327                 PDUMP_LOG(ERR,
328                           "failed to get port id for device id=%s\n",
329                           p->device);
330                 return -EINVAL;
331         }
332
333         /* validation if packet capture is for all queues */
334         if (queue == RTE_PDUMP_ALL_QUEUES) {
335                 struct rte_eth_dev_info dev_info;
336
337                 ret = rte_eth_dev_info_get(port, &dev_info);
338                 if (ret != 0) {
339                         PDUMP_LOG(ERR,
340                                 "Error during getting device (port %u) info: %s\n",
341                                 port, strerror(-ret));
342                         return ret;
343                 }
344
345                 nb_rx_q = dev_info.nb_rx_queues;
346                 nb_tx_q = dev_info.nb_tx_queues;
347                 if (nb_rx_q == 0 && flags & RTE_PDUMP_FLAG_RX) {
348                         PDUMP_LOG(ERR,
349                                 "number of rx queues cannot be 0\n");
350                         return -EINVAL;
351                 }
352                 if (nb_tx_q == 0 && flags & RTE_PDUMP_FLAG_TX) {
353                         PDUMP_LOG(ERR,
354                                 "number of tx queues cannot be 0\n");
355                         return -EINVAL;
356                 }
357                 if ((nb_tx_q == 0 || nb_rx_q == 0) &&
358                         flags == RTE_PDUMP_FLAG_RXTX) {
359                         PDUMP_LOG(ERR,
360                                 "both tx&rx queues must be non zero\n");
361                         return -EINVAL;
362                 }
363         }
364
365         /* register RX callback */
366         if (flags & RTE_PDUMP_FLAG_RX) {
367                 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1;
368                 ret = pdump_register_rx_callbacks(p->ver, end_q, port, queue,
369                                                   ring, mp, filter,
370                                                   operation, p->snaplen);
371                 if (ret < 0)
372                         return ret;
373         }
374
375         /* register TX callback */
376         if (flags & RTE_PDUMP_FLAG_TX) {
377                 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1;
378                 ret = pdump_register_tx_callbacks(p->ver, end_q, port, queue,
379                                                   ring, mp, filter,
380                                                   operation, p->snaplen);
381                 if (ret < 0)
382                         return ret;
383         }
384
385         return ret;
386 }
387
388 static int
389 pdump_server(const struct rte_mp_msg *mp_msg, const void *peer)
390 {
391         struct rte_mp_msg mp_resp;
392         const struct pdump_request *cli_req;
393         struct pdump_response *resp = (struct pdump_response *)&mp_resp.param;
394
395         /* recv client requests */
396         if (mp_msg->len_param != sizeof(*cli_req)) {
397                 PDUMP_LOG(ERR, "failed to recv from client\n");
398                 resp->err_value = -EINVAL;
399         } else {
400                 cli_req = (const struct pdump_request *)mp_msg->param;
401                 resp->ver = cli_req->ver;
402                 resp->res_op = cli_req->op;
403                 resp->err_value = set_pdump_rxtx_cbs(cli_req);
404         }
405
406         rte_strscpy(mp_resp.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
407         mp_resp.len_param = sizeof(*resp);
408         mp_resp.num_fds = 0;
409         if (rte_mp_reply(&mp_resp, peer) < 0) {
410                 PDUMP_LOG(ERR, "failed to send to client:%s\n",
411                           strerror(rte_errno));
412                 return -1;
413         }
414
415         return 0;
416 }
417
418 int
419 rte_pdump_init(void)
420 {
421         const struct rte_memzone *mz;
422         int ret;
423
424         mz = rte_memzone_reserve(MZ_RTE_PDUMP_STATS, sizeof(*pdump_stats),
425                                  rte_socket_id(), 0);
426         if (mz == NULL) {
427                 PDUMP_LOG(ERR, "cannot allocate pdump statistics\n");
428                 rte_errno = ENOMEM;
429                 return -1;
430         }
431         pdump_stats = mz->addr;
432
433         ret = rte_mp_action_register(PDUMP_MP, pdump_server);
434         if (ret && rte_errno != ENOTSUP)
435                 return -1;
436         return 0;
437 }
438
439 int
440 rte_pdump_uninit(void)
441 {
442         rte_mp_action_unregister(PDUMP_MP);
443
444         return 0;
445 }
446
447 static int
448 pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
449 {
450         if (ring == NULL || mp == NULL) {
451                 PDUMP_LOG(ERR, "NULL ring or mempool\n");
452                 rte_errno = EINVAL;
453                 return -1;
454         }
455         if (mp->flags & RTE_MEMPOOL_F_SP_PUT ||
456             mp->flags & RTE_MEMPOOL_F_SC_GET) {
457                 PDUMP_LOG(ERR,
458                           "mempool with SP or SC set not valid for pdump,"
459                           "must have MP and MC set\n");
460                 rte_errno = EINVAL;
461                 return -1;
462         }
463         if (rte_ring_is_prod_single(ring) || rte_ring_is_cons_single(ring)) {
464                 PDUMP_LOG(ERR,
465                           "ring with SP or SC set is not valid for pdump,"
466                           "must have MP and MC set\n");
467                 rte_errno = EINVAL;
468                 return -1;
469         }
470
471         return 0;
472 }
473
474 static int
475 pdump_validate_flags(uint32_t flags)
476 {
477         if ((flags & RTE_PDUMP_FLAG_RXTX) == 0) {
478                 PDUMP_LOG(ERR,
479                         "invalid flags, should be either rx/tx/rxtx\n");
480                 rte_errno = EINVAL;
481                 return -1;
482         }
483
484         /* mask off the flags we know about */
485         if (flags & ~(RTE_PDUMP_FLAG_RXTX | RTE_PDUMP_FLAG_PCAPNG)) {
486                 PDUMP_LOG(ERR,
487                           "unknown flags: %#x\n", flags);
488                 rte_errno = ENOTSUP;
489                 return -1;
490         }
491
492         return 0;
493 }
494
495 static int
496 pdump_validate_port(uint16_t port, char *name)
497 {
498         int ret = 0;
499
500         if (port >= RTE_MAX_ETHPORTS) {
501                 PDUMP_LOG(ERR, "Invalid port id %u\n", port);
502                 rte_errno = EINVAL;
503                 return -1;
504         }
505
506         ret = rte_eth_dev_get_name_by_port(port, name);
507         if (ret < 0) {
508                 PDUMP_LOG(ERR, "port %u to name mapping failed\n",
509                           port);
510                 rte_errno = EINVAL;
511                 return -1;
512         }
513
514         return 0;
515 }
516
517 static int
518 pdump_prepare_client_request(const char *device, uint16_t queue,
519                              uint32_t flags, uint32_t snaplen,
520                              uint16_t operation,
521                              struct rte_ring *ring,
522                              struct rte_mempool *mp,
523                              const struct rte_bpf_prm *prm)
524 {
525         int ret = -1;
526         struct rte_mp_msg mp_req, *mp_rep;
527         struct rte_mp_reply mp_reply;
528         struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
529         struct pdump_request *req = (struct pdump_request *)mp_req.param;
530         struct pdump_response *resp;
531
532         memset(req, 0, sizeof(*req));
533
534         req->ver = (flags & RTE_PDUMP_FLAG_PCAPNG) ? V2 : V1;
535         req->flags = flags & RTE_PDUMP_FLAG_RXTX;
536         req->op = operation;
537         req->queue = queue;
538         rte_strscpy(req->device, device, sizeof(req->device));
539
540         if ((operation & ENABLE) != 0) {
541                 req->ring = ring;
542                 req->mp = mp;
543                 req->prm = prm;
544                 req->snaplen = snaplen;
545         }
546
547         rte_strscpy(mp_req.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
548         mp_req.len_param = sizeof(*req);
549         mp_req.num_fds = 0;
550         if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0) {
551                 mp_rep = &mp_reply.msgs[0];
552                 resp = (struct pdump_response *)mp_rep->param;
553                 rte_errno = resp->err_value;
554                 if (!resp->err_value)
555                         ret = 0;
556                 free(mp_reply.msgs);
557         }
558
559         if (ret < 0)
560                 PDUMP_LOG(ERR,
561                         "client request for pdump enable/disable failed\n");
562         return ret;
563 }
564
565 /*
566  * There are two versions of this function, because although original API
567  * left place holder for future filter, it never checked the value.
568  * Therefore the API can't depend on application passing a non
569  * bogus value.
570  */
571 static int
572 pdump_enable(uint16_t port, uint16_t queue,
573              uint32_t flags, uint32_t snaplen,
574              struct rte_ring *ring, struct rte_mempool *mp,
575              const struct rte_bpf_prm *prm)
576 {
577         int ret;
578         char name[RTE_DEV_NAME_MAX_LEN];
579
580         ret = pdump_validate_port(port, name);
581         if (ret < 0)
582                 return ret;
583         ret = pdump_validate_ring_mp(ring, mp);
584         if (ret < 0)
585                 return ret;
586         ret = pdump_validate_flags(flags);
587         if (ret < 0)
588                 return ret;
589
590         if (snaplen == 0)
591                 snaplen = UINT32_MAX;
592
593         return pdump_prepare_client_request(name, queue, flags, snaplen,
594                                             ENABLE, ring, mp, prm);
595 }
596
597 int
598 rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags,
599                  struct rte_ring *ring,
600                  struct rte_mempool *mp,
601                  void *filter __rte_unused)
602 {
603         return pdump_enable(port, queue, flags, 0,
604                             ring, mp, NULL);
605 }
606
607 int
608 rte_pdump_enable_bpf(uint16_t port, uint16_t queue,
609                      uint32_t flags, uint32_t snaplen,
610                      struct rte_ring *ring,
611                      struct rte_mempool *mp,
612                      const struct rte_bpf_prm *prm)
613 {
614         return pdump_enable(port, queue, flags, snaplen,
615                             ring, mp, prm);
616 }
617
618 static int
619 pdump_enable_by_deviceid(const char *device_id, uint16_t queue,
620                          uint32_t flags, uint32_t snaplen,
621                          struct rte_ring *ring,
622                          struct rte_mempool *mp,
623                          const struct rte_bpf_prm *prm)
624 {
625         int ret;
626
627         ret = pdump_validate_ring_mp(ring, mp);
628         if (ret < 0)
629                 return ret;
630         ret = pdump_validate_flags(flags);
631         if (ret < 0)
632                 return ret;
633
634         return pdump_prepare_client_request(device_id, queue, flags, snaplen,
635                                             ENABLE, ring, mp, prm);
636 }
637
638 int
639 rte_pdump_enable_by_deviceid(char *device_id, uint16_t queue,
640                              uint32_t flags,
641                              struct rte_ring *ring,
642                              struct rte_mempool *mp,
643                              void *filter __rte_unused)
644 {
645         return pdump_enable_by_deviceid(device_id, queue, flags, 0,
646                                         ring, mp, NULL);
647 }
648
649 int
650 rte_pdump_enable_bpf_by_deviceid(const char *device_id, uint16_t queue,
651                                  uint32_t flags, uint32_t snaplen,
652                                  struct rte_ring *ring,
653                                  struct rte_mempool *mp,
654                                  const struct rte_bpf_prm *prm)
655 {
656         return pdump_enable_by_deviceid(device_id, queue, flags, snaplen,
657                                         ring, mp, prm);
658 }
659
660 int
661 rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags)
662 {
663         int ret = 0;
664         char name[RTE_DEV_NAME_MAX_LEN];
665
666         ret = pdump_validate_port(port, name);
667         if (ret < 0)
668                 return ret;
669         ret = pdump_validate_flags(flags);
670         if (ret < 0)
671                 return ret;
672
673         ret = pdump_prepare_client_request(name, queue, flags, 0,
674                                            DISABLE, NULL, NULL, NULL);
675
676         return ret;
677 }
678
679 int
680 rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
681                                 uint32_t flags)
682 {
683         int ret = 0;
684
685         ret = pdump_validate_flags(flags);
686         if (ret < 0)
687                 return ret;
688
689         ret = pdump_prepare_client_request(device_id, queue, flags, 0,
690                                            DISABLE, NULL, NULL, NULL);
691
692         return ret;
693 }
694
695 static void
696 pdump_sum_stats(uint16_t port, uint16_t nq,
697                 struct rte_pdump_stats stats[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
698                 struct rte_pdump_stats *total)
699 {
700         uint64_t *sum = (uint64_t *)total;
701         unsigned int i;
702         uint64_t val;
703         uint16_t qid;
704
705         for (qid = 0; qid < nq; qid++) {
706                 const uint64_t *perq = (const uint64_t *)&stats[port][qid];
707
708                 for (i = 0; i < sizeof(*total) / sizeof(uint64_t); i++) {
709                         val = __atomic_load_n(&perq[i], __ATOMIC_RELAXED);
710                         sum[i] += val;
711                 }
712         }
713 }
714
715 int
716 rte_pdump_stats(uint16_t port, struct rte_pdump_stats *stats)
717 {
718         struct rte_eth_dev_info dev_info;
719         const struct rte_memzone *mz;
720         int ret;
721
722         memset(stats, 0, sizeof(*stats));
723         ret = rte_eth_dev_info_get(port, &dev_info);
724         if (ret != 0) {
725                 PDUMP_LOG(ERR,
726                           "Error during getting device (port %u) info: %s\n",
727                           port, strerror(-ret));
728                 return ret;
729         }
730
731         if (pdump_stats == NULL) {
732                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
733                         /* rte_pdump_init was not called */
734                         PDUMP_LOG(ERR, "pdump stats not initialized\n");
735                         rte_errno = EINVAL;
736                         return -1;
737                 }
738
739                 /* secondary process looks up the memzone */
740                 mz = rte_memzone_lookup(MZ_RTE_PDUMP_STATS);
741                 if (mz == NULL) {
742                         /* rte_pdump_init was not called in primary process?? */
743                         PDUMP_LOG(ERR, "can not find pdump stats\n");
744                         rte_errno = EINVAL;
745                         return -1;
746                 }
747                 pdump_stats = mz->addr;
748         }
749
750         pdump_sum_stats(port, dev_info.nb_rx_queues, pdump_stats->rx, stats);
751         pdump_sum_stats(port, dev_info.nb_tx_queues, pdump_stats->tx, stats);
752         return 0;
753 }