af450695ecafdc7a7f33085dc8da21ba2f5f8bc4
[dpdk.git] / rte_pdump.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2018 Intel Corporation
3  */
4
5 #include <rte_memcpy.h>
6 #include <rte_mbuf.h>
7 #include <rte_ethdev.h>
8 #include <rte_lcore.h>
9 #include <rte_log.h>
10 #include <rte_memzone.h>
11 #include <rte_errno.h>
12 #include <rte_string_fns.h>
13 #include <rte_pcapng.h>
14
15 #include "rte_pdump.h"
16
17 RTE_LOG_REGISTER_DEFAULT(pdump_logtype, NOTICE);
18
19 /* Macro for printing using RTE_LOG */
20 #define PDUMP_LOG(level, fmt, args...)                          \
21         rte_log(RTE_LOG_ ## level, pdump_logtype, "%s(): " fmt, \
22                 __func__, ## args)
23
24 /* Used for the multi-process communication */
25 #define PDUMP_MP        "mp_pdump"
26
27 enum pdump_operation {
28         DISABLE = 1,
29         ENABLE = 2
30 };
31
32 /* Internal version number in request */
33 enum pdump_version {
34         V1 = 1,             /* no filtering or snap */
35         V2 = 2,
36 };
37
38 struct pdump_request {
39         uint16_t ver;
40         uint16_t op;
41         uint32_t flags;
42         char device[RTE_DEV_NAME_MAX_LEN];
43         uint16_t queue;
44         struct rte_ring *ring;
45         struct rte_mempool *mp;
46
47         const struct rte_bpf_prm *prm;
48         uint32_t snaplen;
49 };
50
51 struct pdump_response {
52         uint16_t ver;
53         uint16_t res_op;
54         int32_t err_value;
55 };
56
57 static struct pdump_rxtx_cbs {
58         struct rte_ring *ring;
59         struct rte_mempool *mp;
60         const struct rte_eth_rxtx_callback *cb;
61         const struct rte_bpf *filter;
62         enum pdump_version ver;
63         uint32_t snaplen;
64 } rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
65 tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
66
67
68 /*
69  * The packet capture statistics keep track of packets
70  * accepted, filtered and dropped. These are per-queue
71  * and in memory between primary and secondary processes.
72  */
73 static const char MZ_RTE_PDUMP_STATS[] = "rte_pdump_stats";
74 static struct {
75         struct rte_pdump_stats rx[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
76         struct rte_pdump_stats tx[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
77         const struct rte_memzone *mz;
78 } *pdump_stats;
79
80 /* Create a clone of mbuf to be placed into ring. */
81 static void
82 pdump_copy(uint16_t port_id, uint16_t queue,
83            enum rte_pcapng_direction direction,
84            struct rte_mbuf **pkts, uint16_t nb_pkts,
85            const struct pdump_rxtx_cbs *cbs,
86            struct rte_pdump_stats *stats)
87 {
88         unsigned int i;
89         int ring_enq;
90         uint16_t d_pkts = 0;
91         struct rte_mbuf *dup_bufs[nb_pkts];
92         uint64_t ts;
93         struct rte_ring *ring;
94         struct rte_mempool *mp;
95         struct rte_mbuf *p;
96         uint64_t rcs[nb_pkts];
97
98         if (cbs->filter)
99                 rte_bpf_exec_burst(cbs->filter, (void **)pkts, rcs, nb_pkts);
100
101         ts = rte_get_tsc_cycles();
102         ring = cbs->ring;
103         mp = cbs->mp;
104         for (i = 0; i < nb_pkts; i++) {
105                 /*
106                  * This uses same BPF return value convention as socket filter
107                  * and pcap_offline_filter.
108                  * if program returns zero
109                  * then packet doesn't match the filter (will be ignored).
110                  */
111                 if (cbs->filter && rcs[i] == 0) {
112                         __atomic_fetch_add(&stats->filtered,
113                                            1, __ATOMIC_RELAXED);
114                         continue;
115                 }
116
117                 /*
118                  * If using pcapng then want to wrap packets
119                  * otherwise a simple copy.
120                  */
121                 if (cbs->ver == V2)
122                         p = rte_pcapng_copy(port_id, queue,
123                                             pkts[i], mp, cbs->snaplen,
124                                             ts, direction);
125                 else
126                         p = rte_pktmbuf_copy(pkts[i], mp, 0, cbs->snaplen);
127
128                 if (unlikely(p == NULL))
129                         __atomic_fetch_add(&stats->nombuf, 1, __ATOMIC_RELAXED);
130                 else
131                         dup_bufs[d_pkts++] = p;
132         }
133
134         __atomic_fetch_add(&stats->accepted, d_pkts, __ATOMIC_RELAXED);
135
136         ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
137         if (unlikely(ring_enq < d_pkts)) {
138                 unsigned int drops = d_pkts - ring_enq;
139
140                 __atomic_fetch_add(&stats->ringfull, drops, __ATOMIC_RELAXED);
141                 rte_pktmbuf_free_bulk(&dup_bufs[ring_enq], drops);
142         }
143 }
144
145 static uint16_t
146 pdump_rx(uint16_t port, uint16_t queue,
147         struct rte_mbuf **pkts, uint16_t nb_pkts,
148         uint16_t max_pkts __rte_unused, void *user_params)
149 {
150         const struct pdump_rxtx_cbs *cbs = user_params;
151         struct rte_pdump_stats *stats = &pdump_stats->rx[port][queue];
152
153         pdump_copy(port, queue, RTE_PCAPNG_DIRECTION_IN,
154                    pkts, nb_pkts, cbs, stats);
155         return nb_pkts;
156 }
157
158 static uint16_t
159 pdump_tx(uint16_t port, uint16_t queue,
160                 struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
161 {
162         const struct pdump_rxtx_cbs *cbs = user_params;
163         struct rte_pdump_stats *stats = &pdump_stats->tx[port][queue];
164
165         pdump_copy(port, queue, RTE_PCAPNG_DIRECTION_OUT,
166                    pkts, nb_pkts, cbs, stats);
167         return nb_pkts;
168 }
169
170 static int
171 pdump_register_rx_callbacks(enum pdump_version ver,
172                             uint16_t end_q, uint16_t port, uint16_t queue,
173                             struct rte_ring *ring, struct rte_mempool *mp,
174                             struct rte_bpf *filter,
175                             uint16_t operation, uint32_t snaplen)
176 {
177         uint16_t qid;
178
179         qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
180         for (; qid < end_q; qid++) {
181                 struct pdump_rxtx_cbs *cbs = &rx_cbs[port][qid];
182
183                 if (operation == ENABLE) {
184                         if (cbs->cb) {
185                                 PDUMP_LOG(ERR,
186                                         "rx callback for port=%d queue=%d, already exists\n",
187                                         port, qid);
188                                 return -EEXIST;
189                         }
190                         cbs->ver = ver;
191                         cbs->ring = ring;
192                         cbs->mp = mp;
193                         cbs->snaplen = snaplen;
194                         cbs->filter = filter;
195
196                         cbs->cb = rte_eth_add_first_rx_callback(port, qid,
197                                                                 pdump_rx, cbs);
198                         if (cbs->cb == NULL) {
199                                 PDUMP_LOG(ERR,
200                                         "failed to add rx callback, errno=%d\n",
201                                         rte_errno);
202                                 return rte_errno;
203                         }
204                 } else if (operation == DISABLE) {
205                         int ret;
206
207                         if (cbs->cb == NULL) {
208                                 PDUMP_LOG(ERR,
209                                         "no existing rx callback for port=%d queue=%d\n",
210                                         port, qid);
211                                 return -EINVAL;
212                         }
213                         ret = rte_eth_remove_rx_callback(port, qid, cbs->cb);
214                         if (ret < 0) {
215                                 PDUMP_LOG(ERR,
216                                         "failed to remove rx callback, errno=%d\n",
217                                         -ret);
218                                 return ret;
219                         }
220                         cbs->cb = NULL;
221                 }
222         }
223
224         return 0;
225 }
226
227 static int
228 pdump_register_tx_callbacks(enum pdump_version ver,
229                             uint16_t end_q, uint16_t port, uint16_t queue,
230                             struct rte_ring *ring, struct rte_mempool *mp,
231                             struct rte_bpf *filter,
232                             uint16_t operation, uint32_t snaplen)
233 {
234
235         uint16_t qid;
236
237         qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
238         for (; qid < end_q; qid++) {
239                 struct pdump_rxtx_cbs *cbs = &tx_cbs[port][qid];
240
241                 if (operation == ENABLE) {
242                         if (cbs->cb) {
243                                 PDUMP_LOG(ERR,
244                                         "tx callback for port=%d queue=%d, already exists\n",
245                                         port, qid);
246                                 return -EEXIST;
247                         }
248                         cbs->ver = ver;
249                         cbs->ring = ring;
250                         cbs->mp = mp;
251                         cbs->snaplen = snaplen;
252                         cbs->filter = filter;
253
254                         cbs->cb = rte_eth_add_tx_callback(port, qid, pdump_tx,
255                                                                 cbs);
256                         if (cbs->cb == NULL) {
257                                 PDUMP_LOG(ERR,
258                                         "failed to add tx callback, errno=%d\n",
259                                         rte_errno);
260                                 return rte_errno;
261                         }
262                 } else if (operation == DISABLE) {
263                         int ret;
264
265                         if (cbs->cb == NULL) {
266                                 PDUMP_LOG(ERR,
267                                         "no existing tx callback for port=%d queue=%d\n",
268                                         port, qid);
269                                 return -EINVAL;
270                         }
271                         ret = rte_eth_remove_tx_callback(port, qid, cbs->cb);
272                         if (ret < 0) {
273                                 PDUMP_LOG(ERR,
274                                         "failed to remove tx callback, errno=%d\n",
275                                         -ret);
276                                 return ret;
277                         }
278                         cbs->cb = NULL;
279                 }
280         }
281
282         return 0;
283 }
284
285 static int
286 set_pdump_rxtx_cbs(const struct pdump_request *p)
287 {
288         uint16_t nb_rx_q = 0, nb_tx_q = 0, end_q, queue;
289         uint16_t port;
290         int ret = 0;
291         struct rte_bpf *filter = NULL;
292         uint32_t flags;
293         uint16_t operation;
294         struct rte_ring *ring;
295         struct rte_mempool *mp;
296
297         /* Check for possible DPDK version mismatch */
298         if (!(p->ver == V1 || p->ver == V2)) {
299                 PDUMP_LOG(ERR,
300                           "incorrect client version %u\n", p->ver);
301                 return -EINVAL;
302         }
303
304         if (p->prm) {
305                 if (p->prm->prog_arg.type != RTE_BPF_ARG_PTR_MBUF) {
306                         PDUMP_LOG(ERR,
307                                   "invalid BPF program type: %u\n",
308                                   p->prm->prog_arg.type);
309                         return -EINVAL;
310                 }
311
312                 filter = rte_bpf_load(p->prm);
313                 if (filter == NULL) {
314                         PDUMP_LOG(ERR, "cannot load BPF filter: %s\n",
315                                   rte_strerror(rte_errno));
316                         return -rte_errno;
317                 }
318         }
319
320         flags = p->flags;
321         operation = p->op;
322         queue = p->queue;
323         ring = p->ring;
324         mp = p->mp;
325
326         ret = rte_eth_dev_get_port_by_name(p->device, &port);
327         if (ret < 0) {
328                 PDUMP_LOG(ERR,
329                           "failed to get port id for device id=%s\n",
330                           p->device);
331                 return -EINVAL;
332         }
333
334         /* validation if packet capture is for all queues */
335         if (queue == RTE_PDUMP_ALL_QUEUES) {
336                 struct rte_eth_dev_info dev_info;
337
338                 ret = rte_eth_dev_info_get(port, &dev_info);
339                 if (ret != 0) {
340                         PDUMP_LOG(ERR,
341                                 "Error during getting device (port %u) info: %s\n",
342                                 port, strerror(-ret));
343                         return ret;
344                 }
345
346                 nb_rx_q = dev_info.nb_rx_queues;
347                 nb_tx_q = dev_info.nb_tx_queues;
348                 if (nb_rx_q == 0 && flags & RTE_PDUMP_FLAG_RX) {
349                         PDUMP_LOG(ERR,
350                                 "number of rx queues cannot be 0\n");
351                         return -EINVAL;
352                 }
353                 if (nb_tx_q == 0 && flags & RTE_PDUMP_FLAG_TX) {
354                         PDUMP_LOG(ERR,
355                                 "number of tx queues cannot be 0\n");
356                         return -EINVAL;
357                 }
358                 if ((nb_tx_q == 0 || nb_rx_q == 0) &&
359                         flags == RTE_PDUMP_FLAG_RXTX) {
360                         PDUMP_LOG(ERR,
361                                 "both tx&rx queues must be non zero\n");
362                         return -EINVAL;
363                 }
364         }
365
366         /* register RX callback */
367         if (flags & RTE_PDUMP_FLAG_RX) {
368                 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1;
369                 ret = pdump_register_rx_callbacks(p->ver, end_q, port, queue,
370                                                   ring, mp, filter,
371                                                   operation, p->snaplen);
372                 if (ret < 0)
373                         return ret;
374         }
375
376         /* register TX callback */
377         if (flags & RTE_PDUMP_FLAG_TX) {
378                 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1;
379                 ret = pdump_register_tx_callbacks(p->ver, end_q, port, queue,
380                                                   ring, mp, filter,
381                                                   operation, p->snaplen);
382                 if (ret < 0)
383                         return ret;
384         }
385
386         return ret;
387 }
388
389 static int
390 pdump_server(const struct rte_mp_msg *mp_msg, const void *peer)
391 {
392         struct rte_mp_msg mp_resp;
393         const struct pdump_request *cli_req;
394         struct pdump_response *resp = (struct pdump_response *)&mp_resp.param;
395
396         /* recv client requests */
397         if (mp_msg->len_param != sizeof(*cli_req)) {
398                 PDUMP_LOG(ERR, "failed to recv from client\n");
399                 resp->err_value = -EINVAL;
400         } else {
401                 cli_req = (const struct pdump_request *)mp_msg->param;
402                 resp->ver = cli_req->ver;
403                 resp->res_op = cli_req->op;
404                 resp->err_value = set_pdump_rxtx_cbs(cli_req);
405         }
406
407         rte_strscpy(mp_resp.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
408         mp_resp.len_param = sizeof(*resp);
409         mp_resp.num_fds = 0;
410         if (rte_mp_reply(&mp_resp, peer) < 0) {
411                 PDUMP_LOG(ERR, "failed to send to client:%s\n",
412                           strerror(rte_errno));
413                 return -1;
414         }
415
416         return 0;
417 }
418
419 int
420 rte_pdump_init(void)
421 {
422         const struct rte_memzone *mz;
423         int ret;
424
425         mz = rte_memzone_reserve(MZ_RTE_PDUMP_STATS, sizeof(*pdump_stats),
426                                  rte_socket_id(), 0);
427         if (mz == NULL) {
428                 PDUMP_LOG(ERR, "cannot allocate pdump statistics\n");
429                 rte_errno = ENOMEM;
430                 return -1;
431         }
432         pdump_stats = mz->addr;
433         pdump_stats->mz = mz;
434
435         ret = rte_mp_action_register(PDUMP_MP, pdump_server);
436         if (ret && rte_errno != ENOTSUP)
437                 return -1;
438         return 0;
439 }
440
441 int
442 rte_pdump_uninit(void)
443 {
444         rte_mp_action_unregister(PDUMP_MP);
445
446         if (pdump_stats != NULL) {
447                 rte_memzone_free(pdump_stats->mz);
448                 pdump_stats = NULL;
449         }
450
451         return 0;
452 }
453
454 static int
455 pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
456 {
457         if (ring == NULL || mp == NULL) {
458                 PDUMP_LOG(ERR, "NULL ring or mempool\n");
459                 rte_errno = EINVAL;
460                 return -1;
461         }
462         if (mp->flags & RTE_MEMPOOL_F_SP_PUT ||
463             mp->flags & RTE_MEMPOOL_F_SC_GET) {
464                 PDUMP_LOG(ERR,
465                           "mempool with SP or SC set not valid for pdump,"
466                           "must have MP and MC set\n");
467                 rte_errno = EINVAL;
468                 return -1;
469         }
470         if (rte_ring_is_prod_single(ring) || rte_ring_is_cons_single(ring)) {
471                 PDUMP_LOG(ERR,
472                           "ring with SP or SC set is not valid for pdump,"
473                           "must have MP and MC set\n");
474                 rte_errno = EINVAL;
475                 return -1;
476         }
477
478         return 0;
479 }
480
481 static int
482 pdump_validate_flags(uint32_t flags)
483 {
484         if ((flags & RTE_PDUMP_FLAG_RXTX) == 0) {
485                 PDUMP_LOG(ERR,
486                         "invalid flags, should be either rx/tx/rxtx\n");
487                 rte_errno = EINVAL;
488                 return -1;
489         }
490
491         /* mask off the flags we know about */
492         if (flags & ~(RTE_PDUMP_FLAG_RXTX | RTE_PDUMP_FLAG_PCAPNG)) {
493                 PDUMP_LOG(ERR,
494                           "unknown flags: %#x\n", flags);
495                 rte_errno = ENOTSUP;
496                 return -1;
497         }
498
499         return 0;
500 }
501
502 static int
503 pdump_validate_port(uint16_t port, char *name)
504 {
505         int ret = 0;
506
507         if (port >= RTE_MAX_ETHPORTS) {
508                 PDUMP_LOG(ERR, "Invalid port id %u\n", port);
509                 rte_errno = EINVAL;
510                 return -1;
511         }
512
513         ret = rte_eth_dev_get_name_by_port(port, name);
514         if (ret < 0) {
515                 PDUMP_LOG(ERR, "port %u to name mapping failed\n",
516                           port);
517                 rte_errno = EINVAL;
518                 return -1;
519         }
520
521         return 0;
522 }
523
524 static int
525 pdump_prepare_client_request(const char *device, uint16_t queue,
526                              uint32_t flags, uint32_t snaplen,
527                              uint16_t operation,
528                              struct rte_ring *ring,
529                              struct rte_mempool *mp,
530                              const struct rte_bpf_prm *prm)
531 {
532         int ret = -1;
533         struct rte_mp_msg mp_req, *mp_rep;
534         struct rte_mp_reply mp_reply;
535         struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
536         struct pdump_request *req = (struct pdump_request *)mp_req.param;
537         struct pdump_response *resp;
538
539         memset(req, 0, sizeof(*req));
540
541         req->ver = (flags & RTE_PDUMP_FLAG_PCAPNG) ? V2 : V1;
542         req->flags = flags & RTE_PDUMP_FLAG_RXTX;
543         req->op = operation;
544         req->queue = queue;
545         rte_strscpy(req->device, device, sizeof(req->device));
546
547         if ((operation & ENABLE) != 0) {
548                 req->ring = ring;
549                 req->mp = mp;
550                 req->prm = prm;
551                 req->snaplen = snaplen;
552         }
553
554         rte_strscpy(mp_req.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
555         mp_req.len_param = sizeof(*req);
556         mp_req.num_fds = 0;
557         if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0) {
558                 mp_rep = &mp_reply.msgs[0];
559                 resp = (struct pdump_response *)mp_rep->param;
560                 rte_errno = resp->err_value;
561                 if (!resp->err_value)
562                         ret = 0;
563                 free(mp_reply.msgs);
564         }
565
566         if (ret < 0)
567                 PDUMP_LOG(ERR,
568                         "client request for pdump enable/disable failed\n");
569         return ret;
570 }
571
572 /*
573  * There are two versions of this function, because although original API
574  * left place holder for future filter, it never checked the value.
575  * Therefore the API can't depend on application passing a non
576  * bogus value.
577  */
578 static int
579 pdump_enable(uint16_t port, uint16_t queue,
580              uint32_t flags, uint32_t snaplen,
581              struct rte_ring *ring, struct rte_mempool *mp,
582              const struct rte_bpf_prm *prm)
583 {
584         int ret;
585         char name[RTE_DEV_NAME_MAX_LEN];
586
587         ret = pdump_validate_port(port, name);
588         if (ret < 0)
589                 return ret;
590         ret = pdump_validate_ring_mp(ring, mp);
591         if (ret < 0)
592                 return ret;
593         ret = pdump_validate_flags(flags);
594         if (ret < 0)
595                 return ret;
596
597         if (snaplen == 0)
598                 snaplen = UINT32_MAX;
599
600         return pdump_prepare_client_request(name, queue, flags, snaplen,
601                                             ENABLE, ring, mp, prm);
602 }
603
604 int
605 rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags,
606                  struct rte_ring *ring,
607                  struct rte_mempool *mp,
608                  void *filter __rte_unused)
609 {
610         return pdump_enable(port, queue, flags, 0,
611                             ring, mp, NULL);
612 }
613
614 int
615 rte_pdump_enable_bpf(uint16_t port, uint16_t queue,
616                      uint32_t flags, uint32_t snaplen,
617                      struct rte_ring *ring,
618                      struct rte_mempool *mp,
619                      const struct rte_bpf_prm *prm)
620 {
621         return pdump_enable(port, queue, flags, snaplen,
622                             ring, mp, prm);
623 }
624
625 static int
626 pdump_enable_by_deviceid(const char *device_id, uint16_t queue,
627                          uint32_t flags, uint32_t snaplen,
628                          struct rte_ring *ring,
629                          struct rte_mempool *mp,
630                          const struct rte_bpf_prm *prm)
631 {
632         int ret;
633
634         ret = pdump_validate_ring_mp(ring, mp);
635         if (ret < 0)
636                 return ret;
637         ret = pdump_validate_flags(flags);
638         if (ret < 0)
639                 return ret;
640
641         if (snaplen == 0)
642                 snaplen = UINT32_MAX;
643
644         return pdump_prepare_client_request(device_id, queue, flags, snaplen,
645                                             ENABLE, ring, mp, prm);
646 }
647
648 int
649 rte_pdump_enable_by_deviceid(char *device_id, uint16_t queue,
650                              uint32_t flags,
651                              struct rte_ring *ring,
652                              struct rte_mempool *mp,
653                              void *filter __rte_unused)
654 {
655         return pdump_enable_by_deviceid(device_id, queue, flags, 0,
656                                         ring, mp, NULL);
657 }
658
659 int
660 rte_pdump_enable_bpf_by_deviceid(const char *device_id, uint16_t queue,
661                                  uint32_t flags, uint32_t snaplen,
662                                  struct rte_ring *ring,
663                                  struct rte_mempool *mp,
664                                  const struct rte_bpf_prm *prm)
665 {
666         return pdump_enable_by_deviceid(device_id, queue, flags, snaplen,
667                                         ring, mp, prm);
668 }
669
670 int
671 rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags)
672 {
673         int ret = 0;
674         char name[RTE_DEV_NAME_MAX_LEN];
675
676         ret = pdump_validate_port(port, name);
677         if (ret < 0)
678                 return ret;
679         ret = pdump_validate_flags(flags);
680         if (ret < 0)
681                 return ret;
682
683         ret = pdump_prepare_client_request(name, queue, flags, 0,
684                                            DISABLE, NULL, NULL, NULL);
685
686         return ret;
687 }
688
689 int
690 rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
691                                 uint32_t flags)
692 {
693         int ret = 0;
694
695         ret = pdump_validate_flags(flags);
696         if (ret < 0)
697                 return ret;
698
699         ret = pdump_prepare_client_request(device_id, queue, flags, 0,
700                                            DISABLE, NULL, NULL, NULL);
701
702         return ret;
703 }
704
705 static void
706 pdump_sum_stats(uint16_t port, uint16_t nq,
707                 struct rte_pdump_stats stats[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
708                 struct rte_pdump_stats *total)
709 {
710         uint64_t *sum = (uint64_t *)total;
711         unsigned int i;
712         uint64_t val;
713         uint16_t qid;
714
715         for (qid = 0; qid < nq; qid++) {
716                 const uint64_t *perq = (const uint64_t *)&stats[port][qid];
717
718                 for (i = 0; i < sizeof(*total) / sizeof(uint64_t); i++) {
719                         val = __atomic_load_n(&perq[i], __ATOMIC_RELAXED);
720                         sum[i] += val;
721                 }
722         }
723 }
724
725 int
726 rte_pdump_stats(uint16_t port, struct rte_pdump_stats *stats)
727 {
728         struct rte_eth_dev_info dev_info;
729         const struct rte_memzone *mz;
730         int ret;
731
732         memset(stats, 0, sizeof(*stats));
733         ret = rte_eth_dev_info_get(port, &dev_info);
734         if (ret != 0) {
735                 PDUMP_LOG(ERR,
736                           "Error during getting device (port %u) info: %s\n",
737                           port, strerror(-ret));
738                 return ret;
739         }
740
741         if (pdump_stats == NULL) {
742                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
743                         /* rte_pdump_init was not called */
744                         PDUMP_LOG(ERR, "pdump stats not initialized\n");
745                         rte_errno = EINVAL;
746                         return -1;
747                 }
748
749                 /* secondary process looks up the memzone */
750                 mz = rte_memzone_lookup(MZ_RTE_PDUMP_STATS);
751                 if (mz == NULL) {
752                         /* rte_pdump_init was not called in primary process?? */
753                         PDUMP_LOG(ERR, "can not find pdump stats\n");
754                         rte_errno = EINVAL;
755                         return -1;
756                 }
757                 pdump_stats = mz->addr;
758         }
759
760         pdump_sum_stats(port, dev_info.nb_rx_queues, pdump_stats->rx, stats);
761         pdump_sum_stats(port, dev_info.nb_tx_queues, pdump_stats->tx, stats);
762         return 0;
763 }