1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
5 #include <rte_ethdev_driver.h>
6 #include <rte_interrupts.h>
8 #include <rte_malloc.h>
11 #include "mlx5_rxtx.h"
12 #include "mlx5_common_os.h"
14 /* Destroy Event Queue Notification Channel. */
16 mlx5_txpp_destroy_eqn(struct mlx5_dev_ctx_shared *sh)
19 mlx5_glue->devx_destroy_event_channel(sh->txpp.echan);
20 sh->txpp.echan = NULL;
25 /* Create Event Queue Notification Channel. */
27 mlx5_txpp_create_eqn(struct mlx5_dev_ctx_shared *sh)
31 MLX5_ASSERT(!sh->txpp.echan);
32 lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
33 if (mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->txpp.eqn)) {
35 DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
39 sh->txpp.echan = mlx5_glue->devx_create_event_channel(sh->ctx,
40 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
41 if (!sh->txpp.echan) {
44 DRV_LOG(ERR, "Failed to create event channel %d.",
52 mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
55 claim_zero(mlx5_devx_cmd_destroy(wq->sq));
57 claim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem));
59 rte_free((void *)(uintptr_t)wq->sq_buf);
61 claim_zero(mlx5_devx_cmd_destroy(wq->cq));
63 claim_zero(mlx5_glue->devx_umem_dereg(wq->cq_umem));
65 rte_free((void *)(uintptr_t)wq->cq_buf);
66 memset(wq, 0, sizeof(*wq));
70 mlx5_txpp_destroy_rearm_queue(struct mlx5_dev_ctx_shared *sh)
72 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
74 mlx5_txpp_destroy_send_queue(wq);
78 mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh)
80 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
82 mlx5_txpp_destroy_send_queue(wq);
86 mlx5_txpp_fill_cqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
88 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
89 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
92 for (i = 0; i < MLX5_TXPP_REARM_CQ_SIZE; i++) {
93 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
99 mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
101 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
102 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
105 for (i = 0; i < wq->sq_size; i += 2) {
106 struct mlx5_wqe_cseg *cs;
107 struct mlx5_wqe_qseg *qs;
110 /* Build SEND_EN request with slave WQE index. */
111 cs = &wqe[i + 0].cseg;
112 cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0);
113 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
114 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS <<
115 MLX5_COMP_MODE_OFFSET);
116 cs->misc = RTE_BE32(0);
117 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
118 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) &
119 ((1 << MLX5_WQ_INDEX_WIDTH) - 1);
120 qs->max_index = rte_cpu_to_be_32(index);
121 qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.sq->id);
122 /* Build WAIT request with slave CQE index. */
123 cs = &wqe[i + 1].cseg;
124 cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0);
125 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
126 cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR <<
127 MLX5_COMP_MODE_OFFSET);
128 cs->misc = RTE_BE32(0);
129 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
130 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) &
131 ((1 << MLX5_CQ_INDEX_WIDTH) - 1);
132 qs->max_index = rte_cpu_to_be_32(index);
133 qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.cq->id);
137 /* Creates the Rearm Queue to fire the requests to Clock Queue in realtime. */
139 mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
141 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
142 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
143 struct mlx5_devx_cq_attr cq_attr = { 0 };
144 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
145 size_t page_size = sysconf(_SC_PAGESIZE);
146 uint32_t umem_size, umem_dbrec;
149 /* Allocate memory buffer for CQEs and doorbell record. */
150 umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE;
151 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
152 umem_size += MLX5_DBR_SIZE;
153 wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
154 page_size, sh->numa_node);
156 DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
159 /* Register allocated buffer in user space with DevX. */
160 wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
161 (void *)(uintptr_t)wq->cq_buf,
163 IBV_ACCESS_LOCAL_WRITE);
166 DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
169 /* Create completion queue object for Rearm Queue. */
170 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
171 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
172 cq_attr.uar_page_id = sh->tx_uar->page_id;
173 cq_attr.eqn = sh->txpp.eqn;
174 cq_attr.q_umem_valid = 1;
175 cq_attr.q_umem_offset = 0;
176 cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
177 cq_attr.db_umem_valid = 1;
178 cq_attr.db_umem_offset = umem_dbrec;
179 cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
180 cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_REARM_CQ_SIZE);
181 cq_attr.log_page_size = rte_log2_u32(page_size);
182 wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
185 DRV_LOG(ERR, "Failed to create CQ for Rearm Queue.");
188 wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
191 /* Mark all CQEs initially as invalid. */
192 mlx5_txpp_fill_cqe_rearm_queue(sh);
194 * Allocate memory buffer for Send Queue WQEs.
195 * There should be no WQE leftovers in the cyclic queue.
197 wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE;
198 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
199 umem_size = MLX5_WQE_SIZE * wq->sq_size;
200 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
201 umem_size += MLX5_DBR_SIZE;
202 wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
203 page_size, sh->numa_node);
205 DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
209 /* Register allocated buffer in user space with DevX. */
210 wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
211 (void *)(uintptr_t)wq->sq_buf,
213 IBV_ACCESS_LOCAL_WRITE);
216 DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
219 /* Create send queue object for Rearm Queue. */
220 sq_attr.state = MLX5_SQC_STATE_RST;
221 sq_attr.tis_lst_sz = 1;
222 sq_attr.tis_num = sh->tis->id;
223 sq_attr.cqn = wq->cq->id;
224 sq_attr.cd_master = 1;
225 sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
226 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
227 sq_attr.wq_attr.pd = sh->pdn;
228 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
229 sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
230 sq_attr.wq_attr.dbr_umem_valid = 1;
231 sq_attr.wq_attr.dbr_addr = umem_dbrec;
232 sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
233 sq_attr.wq_attr.wq_umem_valid = 1;
234 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
235 sq_attr.wq_attr.wq_umem_offset = 0;
236 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
239 DRV_LOG(ERR, "Failed to create SQ for Rearm Queue.");
242 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
243 MLX5_SND_DBR * sizeof(uint32_t));
244 /* Build the WQEs in the Send Queue before goto Ready state. */
245 mlx5_txpp_fill_wqe_rearm_queue(sh);
246 /* Change queue state to ready. */
247 msq_attr.sq_state = MLX5_SQC_STATE_RST;
248 msq_attr.state = MLX5_SQC_STATE_RDY;
249 ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
251 DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue.");
257 mlx5_txpp_destroy_rearm_queue(sh);
263 mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
265 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
266 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
267 struct mlx5_wqe_cseg *cs = &wqe->cseg;
268 uint32_t wqe_size, opcode, i;
271 /* For test purposes fill the WQ with SEND inline packet. */
273 wqe_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
275 2 * MLX5_WQE_ESEG_SIZE -
276 MLX5_ESEG_MIN_INLINE_SIZE,
278 opcode = MLX5_OPCODE_SEND;
280 wqe_size = MLX5_WSEG_SIZE;
281 opcode = MLX5_OPCODE_NOP;
283 cs->opcode = rte_cpu_to_be_32(opcode | 0); /* Index is ignored. */
284 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) |
285 (wqe_size / MLX5_WSEG_SIZE));
286 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
287 cs->misc = RTE_BE32(0);
288 wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE);
290 struct mlx5_wqe_eseg *es = &wqe->eseg;
291 struct rte_ether_hdr *eth_hdr;
292 struct rte_ipv4_hdr *ip_hdr;
293 struct rte_udp_hdr *udp_hdr;
295 /* Build the inline test packet pattern. */
296 MLX5_ASSERT(wqe_size <= MLX5_WQE_SIZE_MAX);
297 MLX5_ASSERT(MLX5_TXPP_TEST_PKT_SIZE >=
298 (sizeof(struct rte_ether_hdr) +
299 sizeof(struct rte_ipv4_hdr)));
301 es->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
306 es->inline_hdr_sz = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE);
307 /* Build test packet L2 header (Ethernet). */
308 dst = (uint8_t *)&es->inline_data;
309 eth_hdr = (struct rte_ether_hdr *)dst;
310 rte_eth_random_addr(ð_hdr->d_addr.addr_bytes[0]);
311 rte_eth_random_addr(ð_hdr->s_addr.addr_bytes[0]);
312 eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
313 /* Build test packet L3 header (IP v4). */
314 dst += sizeof(struct rte_ether_hdr);
315 ip_hdr = (struct rte_ipv4_hdr *)dst;
316 ip_hdr->version_ihl = RTE_IPV4_VHL_DEF;
317 ip_hdr->type_of_service = 0;
318 ip_hdr->fragment_offset = 0;
319 ip_hdr->time_to_live = 64;
320 ip_hdr->next_proto_id = IPPROTO_UDP;
321 ip_hdr->packet_id = 0;
322 ip_hdr->total_length = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
323 sizeof(struct rte_ether_hdr));
324 /* use RFC5735 / RFC2544 reserved network test addresses */
325 ip_hdr->src_addr = RTE_BE32((198U << 24) | (18 << 16) |
327 ip_hdr->dst_addr = RTE_BE32((198U << 24) | (18 << 16) |
329 if (MLX5_TXPP_TEST_PKT_SIZE <
330 (sizeof(struct rte_ether_hdr) +
331 sizeof(struct rte_ipv4_hdr) +
332 sizeof(struct rte_udp_hdr)))
334 /* Build test packet L4 header (UDP). */
335 dst += sizeof(struct rte_ipv4_hdr);
336 udp_hdr = (struct rte_udp_hdr *)dst;
337 udp_hdr->src_port = RTE_BE16(9); /* RFC863 Discard. */
338 udp_hdr->dst_port = RTE_BE16(9);
339 udp_hdr->dgram_len = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
340 sizeof(struct rte_ether_hdr) -
341 sizeof(struct rte_ipv4_hdr));
342 udp_hdr->dgram_cksum = 0;
343 /* Fill the test packet data. */
344 dst += sizeof(struct rte_udp_hdr);
345 for (i = sizeof(struct rte_ether_hdr) +
346 sizeof(struct rte_ipv4_hdr) +
347 sizeof(struct rte_udp_hdr);
348 i < MLX5_TXPP_TEST_PKT_SIZE; i++)
349 *dst++ = (uint8_t)(i & 0xFF);
352 /* Duplicate the pattern to the next WQEs. */
353 dst = (uint8_t *)(uintptr_t)wq->sq_buf;
354 for (i = 1; i < MLX5_TXPP_CLKQ_SIZE; i++) {
356 rte_memcpy(dst, (void *)(uintptr_t)wq->sq_buf, wqe_size);
360 /* Creates the Clock Queue for packet pacing, returns zero on success. */
362 mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
364 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
365 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
366 struct mlx5_devx_cq_attr cq_attr = { 0 };
367 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
368 size_t page_size = sysconf(_SC_PAGESIZE);
369 uint32_t umem_size, umem_dbrec;
372 /* Allocate memory buffer for CQEs and doorbell record. */
373 umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE;
374 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
375 umem_size += MLX5_DBR_SIZE;
376 wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
377 page_size, sh->numa_node);
379 DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
382 /* Register allocated buffer in user space with DevX. */
383 wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
384 (void *)(uintptr_t)wq->cq_buf,
386 IBV_ACCESS_LOCAL_WRITE);
389 DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
392 /* Create completion queue object for Clock Queue. */
393 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
394 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
395 cq_attr.use_first_only = 1;
396 cq_attr.overrun_ignore = 1;
397 cq_attr.uar_page_id = sh->tx_uar->page_id;
398 cq_attr.eqn = sh->txpp.eqn;
399 cq_attr.q_umem_valid = 1;
400 cq_attr.q_umem_offset = 0;
401 cq_attr.q_umem_id = wq->cq_umem->umem_id;
402 cq_attr.db_umem_valid = 1;
403 cq_attr.db_umem_offset = umem_dbrec;
404 cq_attr.db_umem_id = wq->cq_umem->umem_id;
405 cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_CLKQ_SIZE);
406 cq_attr.log_page_size = rte_log2_u32(page_size);
407 wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
410 DRV_LOG(ERR, "Failed to create CQ for Clock Queue.");
413 wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
415 /* Allocate memory buffer for Send Queue WQEs. */
417 wq->sq_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
419 2 * MLX5_WQE_ESEG_SIZE -
420 MLX5_ESEG_MIN_INLINE_SIZE,
421 MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
422 wq->sq_size *= MLX5_TXPP_CLKQ_SIZE;
424 wq->sq_size = MLX5_TXPP_CLKQ_SIZE;
426 /* There should not be WQE leftovers in the cyclic queue. */
427 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
428 umem_size = MLX5_WQE_SIZE * wq->sq_size;
429 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
430 umem_size += MLX5_DBR_SIZE;
431 wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
432 page_size, sh->numa_node);
434 DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
438 /* Register allocated buffer in user space with DevX. */
439 wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
440 (void *)(uintptr_t)wq->sq_buf,
442 IBV_ACCESS_LOCAL_WRITE);
445 DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
448 /* Create send queue object for Clock Queue. */
450 sq_attr.tis_lst_sz = 1;
451 sq_attr.tis_num = sh->tis->id;
452 sq_attr.non_wire = 0;
453 sq_attr.static_sq_wq = 1;
455 sq_attr.non_wire = 1;
456 sq_attr.static_sq_wq = 1;
458 sq_attr.state = MLX5_SQC_STATE_RST;
459 sq_attr.cqn = wq->cq->id;
460 sq_attr.wq_attr.cd_slave = 1;
461 sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
462 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
463 sq_attr.wq_attr.pd = sh->pdn;
464 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
465 sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
466 sq_attr.wq_attr.dbr_umem_valid = 1;
467 sq_attr.wq_attr.dbr_addr = umem_dbrec;
468 sq_attr.wq_attr.dbr_umem_id = wq->sq_umem->umem_id;
469 sq_attr.wq_attr.wq_umem_valid = 1;
470 sq_attr.wq_attr.wq_umem_id = wq->sq_umem->umem_id;
471 /* umem_offset must be zero for static_sq_wq queue. */
472 sq_attr.wq_attr.wq_umem_offset = 0;
473 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
476 DRV_LOG(ERR, "Failed to create SQ for Clock Queue.");
479 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
480 MLX5_SND_DBR * sizeof(uint32_t));
481 /* Build the WQEs in the Send Queue before goto Ready state. */
482 mlx5_txpp_fill_wqe_clock_queue(sh);
483 /* Change queue state to ready. */
484 msq_attr.sq_state = MLX5_SQC_STATE_RST;
485 msq_attr.state = MLX5_SQC_STATE_RDY;
487 ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
489 DRV_LOG(ERR, "Failed to set SQ ready state Clock Queue.");
495 mlx5_txpp_destroy_clock_queue(sh);
501 * The routine initializes the packet pacing infrastructure:
502 * - allocates PP context
505 * - attaches rearm interrupt handler
507 * Returns 0 on success, negative otherwise
510 mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
512 int tx_pp = priv->config.tx_pp;
515 /* Store the requested pacing parameters. */
516 sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp;
517 sh->txpp.test = !!(tx_pp < 0);
518 sh->txpp.skew = priv->config.tx_skew;
519 sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
520 ret = mlx5_txpp_create_eqn(sh);
523 ret = mlx5_txpp_create_clock_queue(sh);
526 ret = mlx5_txpp_create_rearm_queue(sh);
531 mlx5_txpp_destroy_rearm_queue(sh);
532 mlx5_txpp_destroy_clock_queue(sh);
533 mlx5_txpp_destroy_eqn(sh);
542 * The routine destroys the packet pacing infrastructure:
543 * - detaches rearm interrupt handler
549 mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh)
551 mlx5_txpp_destroy_rearm_queue(sh);
552 mlx5_txpp_destroy_clock_queue(sh);
553 mlx5_txpp_destroy_eqn(sh);
560 * Creates and starts packet pacing infrastructure on specified device.
563 * Pointer to Ethernet device structure.
566 * 0 on success, a negative errno value otherwise and rte_errno is set.
569 mlx5_txpp_start(struct rte_eth_dev *dev)
571 struct mlx5_priv *priv = dev->data->dev_private;
572 struct mlx5_dev_ctx_shared *sh = priv->sh;
576 if (!priv->config.tx_pp) {
577 /* Packet pacing is not requested for the device. */
578 MLX5_ASSERT(priv->txpp_en == 0);
582 /* Packet pacing is already enabled for the device. */
583 MLX5_ASSERT(sh->txpp.refcnt);
586 if (priv->config.tx_pp > 0) {
587 ret = rte_mbuf_dynflag_lookup
588 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
592 ret = pthread_mutex_lock(&sh->txpp.mutex);
595 if (sh->txpp.refcnt) {
599 err = mlx5_txpp_create(sh, priv);
601 MLX5_ASSERT(sh->txpp.tick);
608 ret = pthread_mutex_unlock(&sh->txpp.mutex);
615 * Stops and destroys packet pacing infrastructure on specified device.
618 * Pointer to Ethernet device structure.
621 * 0 on success, a negative errno value otherwise and rte_errno is set.
624 mlx5_txpp_stop(struct rte_eth_dev *dev)
626 struct mlx5_priv *priv = dev->data->dev_private;
627 struct mlx5_dev_ctx_shared *sh = priv->sh;
630 if (!priv->txpp_en) {
631 /* Packet pacing is already disabled for the device. */
635 ret = pthread_mutex_lock(&sh->txpp.mutex);
638 MLX5_ASSERT(sh->txpp.refcnt);
639 if (!sh->txpp.refcnt || --sh->txpp.refcnt)
641 /* No references any more, do actual destroy. */
642 mlx5_txpp_destroy(sh);
643 ret = pthread_mutex_unlock(&sh->txpp.mutex);