1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation
5 #include <rte_memcpy.h>
7 #include <rte_ethdev.h>
10 #include <rte_errno.h>
11 #include <rte_string_fns.h>
13 #include "rte_pdump.h"
15 #define DEVICE_ID_SIZE 64
17 RTE_LOG_REGISTER(pdump_logtype, lib.pdump, NOTICE);
19 /* Macro for printing using RTE_LOG */
20 #define PDUMP_LOG(level, fmt, args...) \
21 rte_log(RTE_LOG_ ## level, pdump_logtype, "%s(): " fmt, \
24 /* Used for the multi-process communication */
25 #define PDUMP_MP "mp_pdump"
27 enum pdump_operation {
36 struct pdump_request {
42 char device[DEVICE_ID_SIZE];
44 struct rte_ring *ring;
45 struct rte_mempool *mp;
49 char device[DEVICE_ID_SIZE];
51 struct rte_ring *ring;
52 struct rte_mempool *mp;
58 struct pdump_response {
64 static struct pdump_rxtx_cbs {
65 struct rte_ring *ring;
66 struct rte_mempool *mp;
67 const struct rte_eth_rxtx_callback *cb;
69 } rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
70 tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
74 pdump_copy(struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
79 struct rte_mbuf *dup_bufs[nb_pkts];
80 struct pdump_rxtx_cbs *cbs;
81 struct rte_ring *ring;
82 struct rte_mempool *mp;
88 for (i = 0; i < nb_pkts; i++) {
89 p = rte_pktmbuf_copy(pkts[i], mp, 0, UINT32_MAX);
91 dup_bufs[d_pkts++] = p;
94 ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
95 if (unlikely(ring_enq < d_pkts)) {
97 "only %d of packets enqueued to ring\n", ring_enq);
99 rte_pktmbuf_free(dup_bufs[ring_enq]);
100 } while (++ring_enq < d_pkts);
105 pdump_rx(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
106 struct rte_mbuf **pkts, uint16_t nb_pkts,
107 uint16_t max_pkts __rte_unused,
110 pdump_copy(pkts, nb_pkts, user_params);
115 pdump_tx(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
116 struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
118 pdump_copy(pkts, nb_pkts, user_params);
123 pdump_register_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
124 struct rte_ring *ring, struct rte_mempool *mp,
128 struct pdump_rxtx_cbs *cbs = NULL;
130 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
131 for (; qid < end_q; qid++) {
132 cbs = &rx_cbs[port][qid];
133 if (cbs && operation == ENABLE) {
136 "failed to add rx callback for port=%d "
137 "and queue=%d, callback already exists\n",
143 cbs->cb = rte_eth_add_first_rx_callback(port, qid,
145 if (cbs->cb == NULL) {
147 "failed to add rx callback, errno=%d\n",
152 if (cbs && operation == DISABLE) {
155 if (cbs->cb == NULL) {
157 "failed to delete non existing rx "
158 "callback for port=%d and queue=%d\n",
162 ret = rte_eth_remove_rx_callback(port, qid, cbs->cb);
165 "failed to remove rx callback, errno=%d\n",
177 pdump_register_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
178 struct rte_ring *ring, struct rte_mempool *mp,
183 struct pdump_rxtx_cbs *cbs = NULL;
185 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
186 for (; qid < end_q; qid++) {
187 cbs = &tx_cbs[port][qid];
188 if (cbs && operation == ENABLE) {
191 "failed to add tx callback for port=%d "
192 "and queue=%d, callback already exists\n",
198 cbs->cb = rte_eth_add_tx_callback(port, qid, pdump_tx,
200 if (cbs->cb == NULL) {
202 "failed to add tx callback, errno=%d\n",
207 if (cbs && operation == DISABLE) {
210 if (cbs->cb == NULL) {
212 "failed to delete non existing tx "
213 "callback for port=%d and queue=%d\n",
217 ret = rte_eth_remove_tx_callback(port, qid, cbs->cb);
220 "failed to remove tx callback, errno=%d\n",
232 set_pdump_rxtx_cbs(const struct pdump_request *p)
234 uint16_t nb_rx_q = 0, nb_tx_q = 0, end_q, queue;
239 struct rte_ring *ring;
240 struct rte_mempool *mp;
244 if (operation == ENABLE) {
245 ret = rte_eth_dev_get_port_by_name(p->data.en_v1.device,
249 "failed to get port id for device id=%s\n",
250 p->data.en_v1.device);
253 queue = p->data.en_v1.queue;
254 ring = p->data.en_v1.ring;
255 mp = p->data.en_v1.mp;
257 ret = rte_eth_dev_get_port_by_name(p->data.dis_v1.device,
261 "failed to get port id for device id=%s\n",
262 p->data.dis_v1.device);
265 queue = p->data.dis_v1.queue;
266 ring = p->data.dis_v1.ring;
267 mp = p->data.dis_v1.mp;
270 /* validation if packet capture is for all queues */
271 if (queue == RTE_PDUMP_ALL_QUEUES) {
272 struct rte_eth_dev_info dev_info;
274 ret = rte_eth_dev_info_get(port, &dev_info);
277 "Error during getting device (port %u) info: %s\n",
278 port, strerror(-ret));
282 nb_rx_q = dev_info.nb_rx_queues;
283 nb_tx_q = dev_info.nb_tx_queues;
284 if (nb_rx_q == 0 && flags & RTE_PDUMP_FLAG_RX) {
286 "number of rx queues cannot be 0\n");
289 if (nb_tx_q == 0 && flags & RTE_PDUMP_FLAG_TX) {
291 "number of tx queues cannot be 0\n");
294 if ((nb_tx_q == 0 || nb_rx_q == 0) &&
295 flags == RTE_PDUMP_FLAG_RXTX) {
297 "both tx&rx queues must be non zero\n");
302 /* register RX callback */
303 if (flags & RTE_PDUMP_FLAG_RX) {
304 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1;
305 ret = pdump_register_rx_callbacks(end_q, port, queue, ring, mp,
311 /* register TX callback */
312 if (flags & RTE_PDUMP_FLAG_TX) {
313 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1;
314 ret = pdump_register_tx_callbacks(end_q, port, queue, ring, mp,
324 pdump_server(const struct rte_mp_msg *mp_msg, const void *peer)
326 struct rte_mp_msg mp_resp;
327 const struct pdump_request *cli_req;
328 struct pdump_response *resp = (struct pdump_response *)&mp_resp.param;
330 /* recv client requests */
331 if (mp_msg->len_param != sizeof(*cli_req)) {
332 PDUMP_LOG(ERR, "failed to recv from client\n");
333 resp->err_value = -EINVAL;
335 cli_req = (const struct pdump_request *)mp_msg->param;
336 resp->ver = cli_req->ver;
337 resp->res_op = cli_req->op;
338 resp->err_value = set_pdump_rxtx_cbs(cli_req);
341 strlcpy(mp_resp.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
342 mp_resp.len_param = sizeof(*resp);
344 if (rte_mp_reply(&mp_resp, peer) < 0) {
345 PDUMP_LOG(ERR, "failed to send to client:%s\n",
346 strerror(rte_errno));
356 int ret = rte_mp_action_register(PDUMP_MP, pdump_server);
357 if (ret && rte_errno != ENOTSUP)
363 rte_pdump_uninit(void)
365 rte_mp_action_unregister(PDUMP_MP);
371 pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
373 if (ring == NULL || mp == NULL) {
374 PDUMP_LOG(ERR, "NULL ring or mempool\n");
378 if (mp->flags & MEMPOOL_F_SP_PUT || mp->flags & MEMPOOL_F_SC_GET) {
379 PDUMP_LOG(ERR, "mempool with either SP or SC settings"
380 " is not valid for pdump, should have MP and MC settings\n");
384 if (rte_ring_is_prod_single(ring) || rte_ring_is_cons_single(ring)) {
385 PDUMP_LOG(ERR, "ring with either SP or SC settings"
386 " is not valid for pdump, should have MP and MC settings\n");
395 pdump_validate_flags(uint32_t flags)
397 if (flags != RTE_PDUMP_FLAG_RX && flags != RTE_PDUMP_FLAG_TX &&
398 flags != RTE_PDUMP_FLAG_RXTX) {
400 "invalid flags, should be either rx/tx/rxtx\n");
409 pdump_validate_port(uint16_t port, char *name)
413 if (port >= RTE_MAX_ETHPORTS) {
414 PDUMP_LOG(ERR, "Invalid port id %u\n", port);
419 ret = rte_eth_dev_get_name_by_port(port, name);
421 PDUMP_LOG(ERR, "port %u to name mapping failed\n",
431 pdump_prepare_client_request(char *device, uint16_t queue,
434 struct rte_ring *ring,
435 struct rte_mempool *mp,
439 struct rte_mp_msg mp_req, *mp_rep;
440 struct rte_mp_reply mp_reply;
441 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
442 struct pdump_request *req = (struct pdump_request *)mp_req.param;
443 struct pdump_response *resp;
448 if ((operation & ENABLE) != 0) {
449 strlcpy(req->data.en_v1.device, device,
450 sizeof(req->data.en_v1.device));
451 req->data.en_v1.queue = queue;
452 req->data.en_v1.ring = ring;
453 req->data.en_v1.mp = mp;
454 req->data.en_v1.filter = filter;
456 strlcpy(req->data.dis_v1.device, device,
457 sizeof(req->data.dis_v1.device));
458 req->data.dis_v1.queue = queue;
459 req->data.dis_v1.ring = NULL;
460 req->data.dis_v1.mp = NULL;
461 req->data.dis_v1.filter = NULL;
464 strlcpy(mp_req.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
465 mp_req.len_param = sizeof(*req);
467 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0) {
468 mp_rep = &mp_reply.msgs[0];
469 resp = (struct pdump_response *)mp_rep->param;
470 rte_errno = resp->err_value;
471 if (!resp->err_value)
478 "client request for pdump enable/disable failed\n");
483 rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags,
484 struct rte_ring *ring,
485 struct rte_mempool *mp,
490 char name[DEVICE_ID_SIZE];
492 ret = pdump_validate_port(port, name);
495 ret = pdump_validate_ring_mp(ring, mp);
498 ret = pdump_validate_flags(flags);
502 ret = pdump_prepare_client_request(name, queue, flags,
503 ENABLE, ring, mp, filter);
509 rte_pdump_enable_by_deviceid(char *device_id, uint16_t queue,
511 struct rte_ring *ring,
512 struct rte_mempool *mp,
517 ret = pdump_validate_ring_mp(ring, mp);
520 ret = pdump_validate_flags(flags);
524 ret = pdump_prepare_client_request(device_id, queue, flags,
525 ENABLE, ring, mp, filter);
531 rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags)
534 char name[DEVICE_ID_SIZE];
536 ret = pdump_validate_port(port, name);
539 ret = pdump_validate_flags(flags);
543 ret = pdump_prepare_client_request(name, queue, flags,
544 DISABLE, NULL, NULL, NULL);
550 rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
555 ret = pdump_validate_flags(flags);
559 ret = pdump_prepare_client_request(device_id, queue, flags,
560 DISABLE, NULL, NULL, NULL);