acl: fix rules with 8-byte field size
[dpdk.git] / lib / pdump / rte_pdump.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2018 Intel Corporation
3  */
4
5 #include <rte_mbuf.h>
6 #include <rte_ethdev.h>
7 #include <rte_lcore.h>
8 #include <rte_log.h>
9 #include <rte_memzone.h>
10 #include <rte_errno.h>
11 #include <rte_string_fns.h>
12 #include <rte_pcapng.h>
13
14 #include "rte_pdump.h"
15
16 RTE_LOG_REGISTER_DEFAULT(pdump_logtype, NOTICE);
17
18 /* Macro for printing using RTE_LOG */
19 #define PDUMP_LOG(level, fmt, args...)                          \
20         rte_log(RTE_LOG_ ## level, pdump_logtype, "%s(): " fmt, \
21                 __func__, ## args)
22
23 /* Used for the multi-process communication */
24 #define PDUMP_MP        "mp_pdump"
25
26 enum pdump_operation {
27         DISABLE = 1,
28         ENABLE = 2
29 };
30
31 /* Internal version number in request */
32 enum pdump_version {
33         V1 = 1,             /* no filtering or snap */
34         V2 = 2,
35 };
36
37 struct pdump_request {
38         uint16_t ver;
39         uint16_t op;
40         uint32_t flags;
41         char device[RTE_DEV_NAME_MAX_LEN];
42         uint16_t queue;
43         struct rte_ring *ring;
44         struct rte_mempool *mp;
45
46         const struct rte_bpf_prm *prm;
47         uint32_t snaplen;
48 };
49
50 struct pdump_response {
51         uint16_t ver;
52         uint16_t res_op;
53         int32_t err_value;
54 };
55
56 static struct pdump_rxtx_cbs {
57         struct rte_ring *ring;
58         struct rte_mempool *mp;
59         const struct rte_eth_rxtx_callback *cb;
60         const struct rte_bpf *filter;
61         enum pdump_version ver;
62         uint32_t snaplen;
63 } rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
64 tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
65
66
67 /*
68  * The packet capture statistics keep track of packets
69  * accepted, filtered and dropped. These are per-queue
70  * and in memory between primary and secondary processes.
71  */
72 static const char MZ_RTE_PDUMP_STATS[] = "rte_pdump_stats";
73 static struct {
74         struct rte_pdump_stats rx[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
75         struct rte_pdump_stats tx[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
76         const struct rte_memzone *mz;
77 } *pdump_stats;
78
79 /* Create a clone of mbuf to be placed into ring. */
80 static void
81 pdump_copy(uint16_t port_id, uint16_t queue,
82            enum rte_pcapng_direction direction,
83            struct rte_mbuf **pkts, uint16_t nb_pkts,
84            const struct pdump_rxtx_cbs *cbs,
85            struct rte_pdump_stats *stats)
86 {
87         unsigned int i;
88         int ring_enq;
89         uint16_t d_pkts = 0;
90         struct rte_mbuf *dup_bufs[nb_pkts];
91         uint64_t ts;
92         struct rte_ring *ring;
93         struct rte_mempool *mp;
94         struct rte_mbuf *p;
95         uint64_t rcs[nb_pkts];
96
97         if (cbs->filter)
98                 rte_bpf_exec_burst(cbs->filter, (void **)pkts, rcs, nb_pkts);
99
100         ts = rte_get_tsc_cycles();
101         ring = cbs->ring;
102         mp = cbs->mp;
103         for (i = 0; i < nb_pkts; i++) {
104                 /*
105                  * This uses same BPF return value convention as socket filter
106                  * and pcap_offline_filter.
107                  * if program returns zero
108                  * then packet doesn't match the filter (will be ignored).
109                  */
110                 if (cbs->filter && rcs[i] == 0) {
111                         __atomic_fetch_add(&stats->filtered,
112                                            1, __ATOMIC_RELAXED);
113                         continue;
114                 }
115
116                 /*
117                  * If using pcapng then want to wrap packets
118                  * otherwise a simple copy.
119                  */
120                 if (cbs->ver == V2)
121                         p = rte_pcapng_copy(port_id, queue,
122                                             pkts[i], mp, cbs->snaplen,
123                                             ts, direction);
124                 else
125                         p = rte_pktmbuf_copy(pkts[i], mp, 0, cbs->snaplen);
126
127                 if (unlikely(p == NULL))
128                         __atomic_fetch_add(&stats->nombuf, 1, __ATOMIC_RELAXED);
129                 else
130                         dup_bufs[d_pkts++] = p;
131         }
132
133         __atomic_fetch_add(&stats->accepted, d_pkts, __ATOMIC_RELAXED);
134
135         ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
136         if (unlikely(ring_enq < d_pkts)) {
137                 unsigned int drops = d_pkts - ring_enq;
138
139                 __atomic_fetch_add(&stats->ringfull, drops, __ATOMIC_RELAXED);
140                 rte_pktmbuf_free_bulk(&dup_bufs[ring_enq], drops);
141         }
142 }
143
144 static uint16_t
145 pdump_rx(uint16_t port, uint16_t queue,
146         struct rte_mbuf **pkts, uint16_t nb_pkts,
147         uint16_t max_pkts __rte_unused, void *user_params)
148 {
149         const struct pdump_rxtx_cbs *cbs = user_params;
150         struct rte_pdump_stats *stats = &pdump_stats->rx[port][queue];
151
152         pdump_copy(port, queue, RTE_PCAPNG_DIRECTION_IN,
153                    pkts, nb_pkts, cbs, stats);
154         return nb_pkts;
155 }
156
157 static uint16_t
158 pdump_tx(uint16_t port, uint16_t queue,
159                 struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
160 {
161         const struct pdump_rxtx_cbs *cbs = user_params;
162         struct rte_pdump_stats *stats = &pdump_stats->tx[port][queue];
163
164         pdump_copy(port, queue, RTE_PCAPNG_DIRECTION_OUT,
165                    pkts, nb_pkts, cbs, stats);
166         return nb_pkts;
167 }
168
169 static int
170 pdump_register_rx_callbacks(enum pdump_version ver,
171                             uint16_t end_q, uint16_t port, uint16_t queue,
172                             struct rte_ring *ring, struct rte_mempool *mp,
173                             struct rte_bpf *filter,
174                             uint16_t operation, uint32_t snaplen)
175 {
176         uint16_t qid;
177
178         qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
179         for (; qid < end_q; qid++) {
180                 struct pdump_rxtx_cbs *cbs = &rx_cbs[port][qid];
181
182                 if (operation == ENABLE) {
183                         if (cbs->cb) {
184                                 PDUMP_LOG(ERR,
185                                         "rx callback for port=%d queue=%d, already exists\n",
186                                         port, qid);
187                                 return -EEXIST;
188                         }
189                         cbs->ver = ver;
190                         cbs->ring = ring;
191                         cbs->mp = mp;
192                         cbs->snaplen = snaplen;
193                         cbs->filter = filter;
194
195                         cbs->cb = rte_eth_add_first_rx_callback(port, qid,
196                                                                 pdump_rx, cbs);
197                         if (cbs->cb == NULL) {
198                                 PDUMP_LOG(ERR,
199                                         "failed to add rx callback, errno=%d\n",
200                                         rte_errno);
201                                 return rte_errno;
202                         }
203                 } else if (operation == DISABLE) {
204                         int ret;
205
206                         if (cbs->cb == NULL) {
207                                 PDUMP_LOG(ERR,
208                                         "no existing rx callback for port=%d queue=%d\n",
209                                         port, qid);
210                                 return -EINVAL;
211                         }
212                         ret = rte_eth_remove_rx_callback(port, qid, cbs->cb);
213                         if (ret < 0) {
214                                 PDUMP_LOG(ERR,
215                                         "failed to remove rx callback, errno=%d\n",
216                                         -ret);
217                                 return ret;
218                         }
219                         cbs->cb = NULL;
220                 }
221         }
222
223         return 0;
224 }
225
226 static int
227 pdump_register_tx_callbacks(enum pdump_version ver,
228                             uint16_t end_q, uint16_t port, uint16_t queue,
229                             struct rte_ring *ring, struct rte_mempool *mp,
230                             struct rte_bpf *filter,
231                             uint16_t operation, uint32_t snaplen)
232 {
233
234         uint16_t qid;
235
236         qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
237         for (; qid < end_q; qid++) {
238                 struct pdump_rxtx_cbs *cbs = &tx_cbs[port][qid];
239
240                 if (operation == ENABLE) {
241                         if (cbs->cb) {
242                                 PDUMP_LOG(ERR,
243                                         "tx callback for port=%d queue=%d, already exists\n",
244                                         port, qid);
245                                 return -EEXIST;
246                         }
247                         cbs->ver = ver;
248                         cbs->ring = ring;
249                         cbs->mp = mp;
250                         cbs->snaplen = snaplen;
251                         cbs->filter = filter;
252
253                         cbs->cb = rte_eth_add_tx_callback(port, qid, pdump_tx,
254                                                                 cbs);
255                         if (cbs->cb == NULL) {
256                                 PDUMP_LOG(ERR,
257                                         "failed to add tx callback, errno=%d\n",
258                                         rte_errno);
259                                 return rte_errno;
260                         }
261                 } else if (operation == DISABLE) {
262                         int ret;
263
264                         if (cbs->cb == NULL) {
265                                 PDUMP_LOG(ERR,
266                                         "no existing tx callback for port=%d queue=%d\n",
267                                         port, qid);
268                                 return -EINVAL;
269                         }
270                         ret = rte_eth_remove_tx_callback(port, qid, cbs->cb);
271                         if (ret < 0) {
272                                 PDUMP_LOG(ERR,
273                                         "failed to remove tx callback, errno=%d\n",
274                                         -ret);
275                                 return ret;
276                         }
277                         cbs->cb = NULL;
278                 }
279         }
280
281         return 0;
282 }
283
284 static int
285 set_pdump_rxtx_cbs(const struct pdump_request *p)
286 {
287         uint16_t nb_rx_q = 0, nb_tx_q = 0, end_q, queue;
288         uint16_t port;
289         int ret = 0;
290         struct rte_bpf *filter = NULL;
291         uint32_t flags;
292         uint16_t operation;
293         struct rte_ring *ring;
294         struct rte_mempool *mp;
295
296         /* Check for possible DPDK version mismatch */
297         if (!(p->ver == V1 || p->ver == V2)) {
298                 PDUMP_LOG(ERR,
299                           "incorrect client version %u\n", p->ver);
300                 return -EINVAL;
301         }
302
303         if (p->prm) {
304                 if (p->prm->prog_arg.type != RTE_BPF_ARG_PTR_MBUF) {
305                         PDUMP_LOG(ERR,
306                                   "invalid BPF program type: %u\n",
307                                   p->prm->prog_arg.type);
308                         return -EINVAL;
309                 }
310
311                 filter = rte_bpf_load(p->prm);
312                 if (filter == NULL) {
313                         PDUMP_LOG(ERR, "cannot load BPF filter: %s\n",
314                                   rte_strerror(rte_errno));
315                         return -rte_errno;
316                 }
317         }
318
319         flags = p->flags;
320         operation = p->op;
321         queue = p->queue;
322         ring = p->ring;
323         mp = p->mp;
324
325         ret = rte_eth_dev_get_port_by_name(p->device, &port);
326         if (ret < 0) {
327                 PDUMP_LOG(ERR,
328                           "failed to get port id for device id=%s\n",
329                           p->device);
330                 return -EINVAL;
331         }
332
333         /* validation if packet capture is for all queues */
334         if (queue == RTE_PDUMP_ALL_QUEUES) {
335                 struct rte_eth_dev_info dev_info;
336
337                 ret = rte_eth_dev_info_get(port, &dev_info);
338                 if (ret != 0) {
339                         PDUMP_LOG(ERR,
340                                 "Error during getting device (port %u) info: %s\n",
341                                 port, strerror(-ret));
342                         return ret;
343                 }
344
345                 nb_rx_q = dev_info.nb_rx_queues;
346                 nb_tx_q = dev_info.nb_tx_queues;
347                 if (nb_rx_q == 0 && flags & RTE_PDUMP_FLAG_RX) {
348                         PDUMP_LOG(ERR,
349                                 "number of rx queues cannot be 0\n");
350                         return -EINVAL;
351                 }
352                 if (nb_tx_q == 0 && flags & RTE_PDUMP_FLAG_TX) {
353                         PDUMP_LOG(ERR,
354                                 "number of tx queues cannot be 0\n");
355                         return -EINVAL;
356                 }
357                 if ((nb_tx_q == 0 || nb_rx_q == 0) &&
358                         flags == RTE_PDUMP_FLAG_RXTX) {
359                         PDUMP_LOG(ERR,
360                                 "both tx&rx queues must be non zero\n");
361                         return -EINVAL;
362                 }
363         }
364
365         /* register RX callback */
366         if (flags & RTE_PDUMP_FLAG_RX) {
367                 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1;
368                 ret = pdump_register_rx_callbacks(p->ver, end_q, port, queue,
369                                                   ring, mp, filter,
370                                                   operation, p->snaplen);
371                 if (ret < 0)
372                         return ret;
373         }
374
375         /* register TX callback */
376         if (flags & RTE_PDUMP_FLAG_TX) {
377                 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1;
378                 ret = pdump_register_tx_callbacks(p->ver, end_q, port, queue,
379                                                   ring, mp, filter,
380                                                   operation, p->snaplen);
381                 if (ret < 0)
382                         return ret;
383         }
384
385         return ret;
386 }
387
388 static int
389 pdump_server(const struct rte_mp_msg *mp_msg, const void *peer)
390 {
391         struct rte_mp_msg mp_resp;
392         const struct pdump_request *cli_req;
393         struct pdump_response *resp = (struct pdump_response *)&mp_resp.param;
394
395         /* recv client requests */
396         if (mp_msg->len_param != sizeof(*cli_req)) {
397                 PDUMP_LOG(ERR, "failed to recv from client\n");
398                 resp->err_value = -EINVAL;
399         } else {
400                 cli_req = (const struct pdump_request *)mp_msg->param;
401                 resp->ver = cli_req->ver;
402                 resp->res_op = cli_req->op;
403                 resp->err_value = set_pdump_rxtx_cbs(cli_req);
404         }
405
406         rte_strscpy(mp_resp.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
407         mp_resp.len_param = sizeof(*resp);
408         mp_resp.num_fds = 0;
409         if (rte_mp_reply(&mp_resp, peer) < 0) {
410                 PDUMP_LOG(ERR, "failed to send to client:%s\n",
411                           strerror(rte_errno));
412                 return -1;
413         }
414
415         return 0;
416 }
417
418 int
419 rte_pdump_init(void)
420 {
421         const struct rte_memzone *mz;
422         int ret;
423
424         mz = rte_memzone_reserve(MZ_RTE_PDUMP_STATS, sizeof(*pdump_stats),
425                                  rte_socket_id(), 0);
426         if (mz == NULL) {
427                 PDUMP_LOG(ERR, "cannot allocate pdump statistics\n");
428                 rte_errno = ENOMEM;
429                 return -1;
430         }
431         pdump_stats = mz->addr;
432         pdump_stats->mz = mz;
433
434         ret = rte_mp_action_register(PDUMP_MP, pdump_server);
435         if (ret && rte_errno != ENOTSUP)
436                 return -1;
437         return 0;
438 }
439
440 int
441 rte_pdump_uninit(void)
442 {
443         rte_mp_action_unregister(PDUMP_MP);
444
445         if (pdump_stats != NULL) {
446                 rte_memzone_free(pdump_stats->mz);
447                 pdump_stats = NULL;
448         }
449
450         return 0;
451 }
452
453 static int
454 pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
455 {
456         if (ring == NULL || mp == NULL) {
457                 PDUMP_LOG(ERR, "NULL ring or mempool\n");
458                 rte_errno = EINVAL;
459                 return -1;
460         }
461         if (mp->flags & RTE_MEMPOOL_F_SP_PUT ||
462             mp->flags & RTE_MEMPOOL_F_SC_GET) {
463                 PDUMP_LOG(ERR,
464                           "mempool with SP or SC set not valid for pdump,"
465                           "must have MP and MC set\n");
466                 rte_errno = EINVAL;
467                 return -1;
468         }
469         if (rte_ring_is_prod_single(ring) || rte_ring_is_cons_single(ring)) {
470                 PDUMP_LOG(ERR,
471                           "ring with SP or SC set is not valid for pdump,"
472                           "must have MP and MC set\n");
473                 rte_errno = EINVAL;
474                 return -1;
475         }
476
477         return 0;
478 }
479
480 static int
481 pdump_validate_flags(uint32_t flags)
482 {
483         if ((flags & RTE_PDUMP_FLAG_RXTX) == 0) {
484                 PDUMP_LOG(ERR,
485                         "invalid flags, should be either rx/tx/rxtx\n");
486                 rte_errno = EINVAL;
487                 return -1;
488         }
489
490         /* mask off the flags we know about */
491         if (flags & ~(RTE_PDUMP_FLAG_RXTX | RTE_PDUMP_FLAG_PCAPNG)) {
492                 PDUMP_LOG(ERR,
493                           "unknown flags: %#x\n", flags);
494                 rte_errno = ENOTSUP;
495                 return -1;
496         }
497
498         return 0;
499 }
500
501 static int
502 pdump_validate_port(uint16_t port, char *name)
503 {
504         int ret = 0;
505
506         if (port >= RTE_MAX_ETHPORTS) {
507                 PDUMP_LOG(ERR, "Invalid port id %u\n", port);
508                 rte_errno = EINVAL;
509                 return -1;
510         }
511
512         ret = rte_eth_dev_get_name_by_port(port, name);
513         if (ret < 0) {
514                 PDUMP_LOG(ERR, "port %u to name mapping failed\n",
515                           port);
516                 rte_errno = EINVAL;
517                 return -1;
518         }
519
520         return 0;
521 }
522
523 static int
524 pdump_prepare_client_request(const char *device, uint16_t queue,
525                              uint32_t flags, uint32_t snaplen,
526                              uint16_t operation,
527                              struct rte_ring *ring,
528                              struct rte_mempool *mp,
529                              const struct rte_bpf_prm *prm)
530 {
531         int ret = -1;
532         struct rte_mp_msg mp_req, *mp_rep;
533         struct rte_mp_reply mp_reply;
534         struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
535         struct pdump_request *req = (struct pdump_request *)mp_req.param;
536         struct pdump_response *resp;
537
538         memset(req, 0, sizeof(*req));
539
540         req->ver = (flags & RTE_PDUMP_FLAG_PCAPNG) ? V2 : V1;
541         req->flags = flags & RTE_PDUMP_FLAG_RXTX;
542         req->op = operation;
543         req->queue = queue;
544         rte_strscpy(req->device, device, sizeof(req->device));
545
546         if ((operation & ENABLE) != 0) {
547                 req->ring = ring;
548                 req->mp = mp;
549                 req->prm = prm;
550                 req->snaplen = snaplen;
551         }
552
553         rte_strscpy(mp_req.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
554         mp_req.len_param = sizeof(*req);
555         mp_req.num_fds = 0;
556         if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0) {
557                 mp_rep = &mp_reply.msgs[0];
558                 resp = (struct pdump_response *)mp_rep->param;
559                 rte_errno = resp->err_value;
560                 if (!resp->err_value)
561                         ret = 0;
562                 free(mp_reply.msgs);
563         }
564
565         if (ret < 0)
566                 PDUMP_LOG(ERR,
567                         "client request for pdump enable/disable failed\n");
568         return ret;
569 }
570
571 /*
572  * There are two versions of this function, because although original API
573  * left place holder for future filter, it never checked the value.
574  * Therefore the API can't depend on application passing a non
575  * bogus value.
576  */
577 static int
578 pdump_enable(uint16_t port, uint16_t queue,
579              uint32_t flags, uint32_t snaplen,
580              struct rte_ring *ring, struct rte_mempool *mp,
581              const struct rte_bpf_prm *prm)
582 {
583         int ret;
584         char name[RTE_DEV_NAME_MAX_LEN];
585
586         ret = pdump_validate_port(port, name);
587         if (ret < 0)
588                 return ret;
589         ret = pdump_validate_ring_mp(ring, mp);
590         if (ret < 0)
591                 return ret;
592         ret = pdump_validate_flags(flags);
593         if (ret < 0)
594                 return ret;
595
596         if (snaplen == 0)
597                 snaplen = UINT32_MAX;
598
599         return pdump_prepare_client_request(name, queue, flags, snaplen,
600                                             ENABLE, ring, mp, prm);
601 }
602
603 int
604 rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags,
605                  struct rte_ring *ring,
606                  struct rte_mempool *mp,
607                  void *filter __rte_unused)
608 {
609         return pdump_enable(port, queue, flags, 0,
610                             ring, mp, NULL);
611 }
612
613 int
614 rte_pdump_enable_bpf(uint16_t port, uint16_t queue,
615                      uint32_t flags, uint32_t snaplen,
616                      struct rte_ring *ring,
617                      struct rte_mempool *mp,
618                      const struct rte_bpf_prm *prm)
619 {
620         return pdump_enable(port, queue, flags, snaplen,
621                             ring, mp, prm);
622 }
623
624 static int
625 pdump_enable_by_deviceid(const char *device_id, uint16_t queue,
626                          uint32_t flags, uint32_t snaplen,
627                          struct rte_ring *ring,
628                          struct rte_mempool *mp,
629                          const struct rte_bpf_prm *prm)
630 {
631         int ret;
632
633         ret = pdump_validate_ring_mp(ring, mp);
634         if (ret < 0)
635                 return ret;
636         ret = pdump_validate_flags(flags);
637         if (ret < 0)
638                 return ret;
639
640         if (snaplen == 0)
641                 snaplen = UINT32_MAX;
642
643         return pdump_prepare_client_request(device_id, queue, flags, snaplen,
644                                             ENABLE, ring, mp, prm);
645 }
646
647 int
648 rte_pdump_enable_by_deviceid(char *device_id, uint16_t queue,
649                              uint32_t flags,
650                              struct rte_ring *ring,
651                              struct rte_mempool *mp,
652                              void *filter __rte_unused)
653 {
654         return pdump_enable_by_deviceid(device_id, queue, flags, 0,
655                                         ring, mp, NULL);
656 }
657
658 int
659 rte_pdump_enable_bpf_by_deviceid(const char *device_id, uint16_t queue,
660                                  uint32_t flags, uint32_t snaplen,
661                                  struct rte_ring *ring,
662                                  struct rte_mempool *mp,
663                                  const struct rte_bpf_prm *prm)
664 {
665         return pdump_enable_by_deviceid(device_id, queue, flags, snaplen,
666                                         ring, mp, prm);
667 }
668
669 int
670 rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags)
671 {
672         int ret = 0;
673         char name[RTE_DEV_NAME_MAX_LEN];
674
675         ret = pdump_validate_port(port, name);
676         if (ret < 0)
677                 return ret;
678         ret = pdump_validate_flags(flags);
679         if (ret < 0)
680                 return ret;
681
682         ret = pdump_prepare_client_request(name, queue, flags, 0,
683                                            DISABLE, NULL, NULL, NULL);
684
685         return ret;
686 }
687
688 int
689 rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
690                                 uint32_t flags)
691 {
692         int ret = 0;
693
694         ret = pdump_validate_flags(flags);
695         if (ret < 0)
696                 return ret;
697
698         ret = pdump_prepare_client_request(device_id, queue, flags, 0,
699                                            DISABLE, NULL, NULL, NULL);
700
701         return ret;
702 }
703
704 static void
705 pdump_sum_stats(uint16_t port, uint16_t nq,
706                 struct rte_pdump_stats stats[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
707                 struct rte_pdump_stats *total)
708 {
709         uint64_t *sum = (uint64_t *)total;
710         unsigned int i;
711         uint64_t val;
712         uint16_t qid;
713
714         for (qid = 0; qid < nq; qid++) {
715                 const uint64_t *perq = (const uint64_t *)&stats[port][qid];
716
717                 for (i = 0; i < sizeof(*total) / sizeof(uint64_t); i++) {
718                         val = __atomic_load_n(&perq[i], __ATOMIC_RELAXED);
719                         sum[i] += val;
720                 }
721         }
722 }
723
724 int
725 rte_pdump_stats(uint16_t port, struct rte_pdump_stats *stats)
726 {
727         struct rte_eth_dev_info dev_info;
728         const struct rte_memzone *mz;
729         int ret;
730
731         memset(stats, 0, sizeof(*stats));
732         ret = rte_eth_dev_info_get(port, &dev_info);
733         if (ret != 0) {
734                 PDUMP_LOG(ERR,
735                           "Error during getting device (port %u) info: %s\n",
736                           port, strerror(-ret));
737                 return ret;
738         }
739
740         if (pdump_stats == NULL) {
741                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
742                         /* rte_pdump_init was not called */
743                         PDUMP_LOG(ERR, "pdump stats not initialized\n");
744                         rte_errno = EINVAL;
745                         return -1;
746                 }
747
748                 /* secondary process looks up the memzone */
749                 mz = rte_memzone_lookup(MZ_RTE_PDUMP_STATS);
750                 if (mz == NULL) {
751                         /* rte_pdump_init was not called in primary process?? */
752                         PDUMP_LOG(ERR, "can not find pdump stats\n");
753                         rte_errno = EINVAL;
754                         return -1;
755                 }
756                 pdump_stats = mz->addr;
757         }
758
759         pdump_sum_stats(port, dev_info.nb_rx_queues, pdump_stats->rx, stats);
760         pdump_sum_stats(port, dev_info.nb_tx_queues, pdump_stats->tx, stats);
761         return 0;
762 }