1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation
5 #include <rte_memcpy.h>
7 #include <rte_ethdev.h>
10 #include <rte_errno.h>
11 #include <rte_string_fns.h>
13 #include "rte_pdump.h"
15 RTE_LOG_REGISTER(pdump_logtype, lib.pdump, NOTICE);
17 /* Macro for printing using RTE_LOG */
18 #define PDUMP_LOG(level, fmt, args...) \
19 rte_log(RTE_LOG_ ## level, pdump_logtype, "%s(): " fmt, \
22 /* Used for the multi-process communication */
23 #define PDUMP_MP "mp_pdump"
25 enum pdump_operation {
34 struct pdump_request {
40 char device[RTE_DEV_NAME_MAX_LEN];
42 struct rte_ring *ring;
43 struct rte_mempool *mp;
47 char device[RTE_DEV_NAME_MAX_LEN];
49 struct rte_ring *ring;
50 struct rte_mempool *mp;
56 struct pdump_response {
62 static struct pdump_rxtx_cbs {
63 struct rte_ring *ring;
64 struct rte_mempool *mp;
65 const struct rte_eth_rxtx_callback *cb;
67 } rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
68 tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
72 pdump_copy(struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
77 struct rte_mbuf *dup_bufs[nb_pkts];
78 struct pdump_rxtx_cbs *cbs;
79 struct rte_ring *ring;
80 struct rte_mempool *mp;
86 for (i = 0; i < nb_pkts; i++) {
87 p = rte_pktmbuf_copy(pkts[i], mp, 0, UINT32_MAX);
89 dup_bufs[d_pkts++] = p;
92 ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
93 if (unlikely(ring_enq < d_pkts)) {
94 unsigned int drops = d_pkts - ring_enq;
97 "only %d of packets enqueued to ring\n", ring_enq);
98 rte_pktmbuf_free_bulk(&dup_bufs[ring_enq], drops);
103 pdump_rx(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
104 struct rte_mbuf **pkts, uint16_t nb_pkts,
105 uint16_t max_pkts __rte_unused,
108 pdump_copy(pkts, nb_pkts, user_params);
113 pdump_tx(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
114 struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
116 pdump_copy(pkts, nb_pkts, user_params);
121 pdump_register_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
122 struct rte_ring *ring, struct rte_mempool *mp,
126 struct pdump_rxtx_cbs *cbs = NULL;
128 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
129 for (; qid < end_q; qid++) {
130 cbs = &rx_cbs[port][qid];
131 if (cbs && operation == ENABLE) {
134 "rx callback for port=%d queue=%d, already exists\n",
140 cbs->cb = rte_eth_add_first_rx_callback(port, qid,
142 if (cbs->cb == NULL) {
144 "failed to add rx callback, errno=%d\n",
149 if (cbs && operation == DISABLE) {
152 if (cbs->cb == NULL) {
154 "no existing rx callback for port=%d queue=%d\n",
158 ret = rte_eth_remove_rx_callback(port, qid, cbs->cb);
161 "failed to remove rx callback, errno=%d\n",
173 pdump_register_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
174 struct rte_ring *ring, struct rte_mempool *mp,
179 struct pdump_rxtx_cbs *cbs = NULL;
181 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
182 for (; qid < end_q; qid++) {
183 cbs = &tx_cbs[port][qid];
184 if (cbs && operation == ENABLE) {
187 "tx callback for port=%d queue=%d, already exists\n",
193 cbs->cb = rte_eth_add_tx_callback(port, qid, pdump_tx,
195 if (cbs->cb == NULL) {
197 "failed to add tx callback, errno=%d\n",
202 if (cbs && operation == DISABLE) {
205 if (cbs->cb == NULL) {
207 "no existing tx callback for port=%d queue=%d\n",
211 ret = rte_eth_remove_tx_callback(port, qid, cbs->cb);
214 "failed to remove tx callback, errno=%d\n",
226 set_pdump_rxtx_cbs(const struct pdump_request *p)
228 uint16_t nb_rx_q = 0, nb_tx_q = 0, end_q, queue;
233 struct rte_ring *ring;
234 struct rte_mempool *mp;
238 if (operation == ENABLE) {
239 ret = rte_eth_dev_get_port_by_name(p->data.en_v1.device,
243 "failed to get port id for device id=%s\n",
244 p->data.en_v1.device);
247 queue = p->data.en_v1.queue;
248 ring = p->data.en_v1.ring;
249 mp = p->data.en_v1.mp;
251 ret = rte_eth_dev_get_port_by_name(p->data.dis_v1.device,
255 "failed to get port id for device id=%s\n",
256 p->data.dis_v1.device);
259 queue = p->data.dis_v1.queue;
260 ring = p->data.dis_v1.ring;
261 mp = p->data.dis_v1.mp;
264 /* validation if packet capture is for all queues */
265 if (queue == RTE_PDUMP_ALL_QUEUES) {
266 struct rte_eth_dev_info dev_info;
268 ret = rte_eth_dev_info_get(port, &dev_info);
271 "Error during getting device (port %u) info: %s\n",
272 port, strerror(-ret));
276 nb_rx_q = dev_info.nb_rx_queues;
277 nb_tx_q = dev_info.nb_tx_queues;
278 if (nb_rx_q == 0 && flags & RTE_PDUMP_FLAG_RX) {
280 "number of rx queues cannot be 0\n");
283 if (nb_tx_q == 0 && flags & RTE_PDUMP_FLAG_TX) {
285 "number of tx queues cannot be 0\n");
288 if ((nb_tx_q == 0 || nb_rx_q == 0) &&
289 flags == RTE_PDUMP_FLAG_RXTX) {
291 "both tx&rx queues must be non zero\n");
296 /* register RX callback */
297 if (flags & RTE_PDUMP_FLAG_RX) {
298 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1;
299 ret = pdump_register_rx_callbacks(end_q, port, queue, ring, mp,
305 /* register TX callback */
306 if (flags & RTE_PDUMP_FLAG_TX) {
307 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1;
308 ret = pdump_register_tx_callbacks(end_q, port, queue, ring, mp,
318 pdump_server(const struct rte_mp_msg *mp_msg, const void *peer)
320 struct rte_mp_msg mp_resp;
321 const struct pdump_request *cli_req;
322 struct pdump_response *resp = (struct pdump_response *)&mp_resp.param;
324 /* recv client requests */
325 if (mp_msg->len_param != sizeof(*cli_req)) {
326 PDUMP_LOG(ERR, "failed to recv from client\n");
327 resp->err_value = -EINVAL;
329 cli_req = (const struct pdump_request *)mp_msg->param;
330 resp->ver = cli_req->ver;
331 resp->res_op = cli_req->op;
332 resp->err_value = set_pdump_rxtx_cbs(cli_req);
335 strlcpy(mp_resp.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
336 mp_resp.len_param = sizeof(*resp);
338 if (rte_mp_reply(&mp_resp, peer) < 0) {
339 PDUMP_LOG(ERR, "failed to send to client:%s\n",
340 strerror(rte_errno));
352 ret = rte_mp_action_register(PDUMP_MP, pdump_server);
353 if (ret && rte_errno != ENOTSUP)
359 rte_pdump_uninit(void)
361 rte_mp_action_unregister(PDUMP_MP);
367 pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
369 if (ring == NULL || mp == NULL) {
370 PDUMP_LOG(ERR, "NULL ring or mempool\n");
374 if (mp->flags & MEMPOOL_F_SP_PUT || mp->flags & MEMPOOL_F_SC_GET) {
376 "mempool with SP or SC set not valid for pdump,"
377 "must have MP and MC set\n");
381 if (rte_ring_is_prod_single(ring) || rte_ring_is_cons_single(ring)) {
383 "ring with SP or SC set is not valid for pdump,"
384 "must have MP and MC set\n");
393 pdump_validate_flags(uint32_t flags)
395 if (flags != RTE_PDUMP_FLAG_RX && flags != RTE_PDUMP_FLAG_TX &&
396 flags != RTE_PDUMP_FLAG_RXTX) {
398 "invalid flags, should be either rx/tx/rxtx\n");
407 pdump_validate_port(uint16_t port, char *name)
411 if (port >= RTE_MAX_ETHPORTS) {
412 PDUMP_LOG(ERR, "Invalid port id %u\n", port);
417 ret = rte_eth_dev_get_name_by_port(port, name);
419 PDUMP_LOG(ERR, "port %u to name mapping failed\n",
429 pdump_prepare_client_request(char *device, uint16_t queue,
432 struct rte_ring *ring,
433 struct rte_mempool *mp,
437 struct rte_mp_msg mp_req, *mp_rep;
438 struct rte_mp_reply mp_reply;
439 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
440 struct pdump_request *req = (struct pdump_request *)mp_req.param;
441 struct pdump_response *resp;
446 if ((operation & ENABLE) != 0) {
447 strlcpy(req->data.en_v1.device, device,
448 sizeof(req->data.en_v1.device));
449 req->data.en_v1.queue = queue;
450 req->data.en_v1.ring = ring;
451 req->data.en_v1.mp = mp;
452 req->data.en_v1.filter = filter;
454 strlcpy(req->data.dis_v1.device, device,
455 sizeof(req->data.dis_v1.device));
456 req->data.dis_v1.queue = queue;
457 req->data.dis_v1.ring = NULL;
458 req->data.dis_v1.mp = NULL;
459 req->data.dis_v1.filter = NULL;
462 strlcpy(mp_req.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
463 mp_req.len_param = sizeof(*req);
465 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0) {
466 mp_rep = &mp_reply.msgs[0];
467 resp = (struct pdump_response *)mp_rep->param;
468 rte_errno = resp->err_value;
469 if (!resp->err_value)
476 "client request for pdump enable/disable failed\n");
481 rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags,
482 struct rte_ring *ring,
483 struct rte_mempool *mp,
487 char name[RTE_DEV_NAME_MAX_LEN];
489 ret = pdump_validate_port(port, name);
492 ret = pdump_validate_ring_mp(ring, mp);
495 ret = pdump_validate_flags(flags);
499 ret = pdump_prepare_client_request(name, queue, flags,
500 ENABLE, ring, mp, filter);
506 rte_pdump_enable_by_deviceid(char *device_id, uint16_t queue,
508 struct rte_ring *ring,
509 struct rte_mempool *mp,
514 ret = pdump_validate_ring_mp(ring, mp);
517 ret = pdump_validate_flags(flags);
521 ret = pdump_prepare_client_request(device_id, queue, flags,
522 ENABLE, ring, mp, filter);
528 rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags)
531 char name[RTE_DEV_NAME_MAX_LEN];
533 ret = pdump_validate_port(port, name);
536 ret = pdump_validate_flags(flags);
540 ret = pdump_prepare_client_request(name, queue, flags,
541 DISABLE, NULL, NULL, NULL);
547 rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
552 ret = pdump_validate_flags(flags);
556 ret = pdump_prepare_client_request(device_id, queue, flags,
557 DISABLE, NULL, NULL, NULL);