1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
5 #include <rte_ethdev_driver.h>
6 #include <rte_interrupts.h>
8 #include <rte_malloc.h>
9 #include <rte_cycles.h>
12 #include "mlx5_rxtx.h"
13 #include "mlx5_common_os.h"
15 /* Destroy Event Queue Notification Channel. */
17 mlx5_txpp_destroy_eqn(struct mlx5_dev_ctx_shared *sh)
20 mlx5_glue->devx_destroy_event_channel(sh->txpp.echan);
21 sh->txpp.echan = NULL;
26 /* Create Event Queue Notification Channel. */
28 mlx5_txpp_create_eqn(struct mlx5_dev_ctx_shared *sh)
32 MLX5_ASSERT(!sh->txpp.echan);
33 lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
34 if (mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->txpp.eqn)) {
36 DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
40 sh->txpp.echan = mlx5_glue->devx_create_event_channel(sh->ctx,
41 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
42 if (!sh->txpp.echan) {
45 DRV_LOG(ERR, "Failed to create event channel %d.",
53 mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh)
56 mlx5_glue->dv_free_pp(sh->txpp.pp);
62 /* Allocate Packet Pacing index from kernel via mlx5dv call. */
64 mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
66 #ifdef HAVE_MLX5DV_PP_ALLOC
67 uint32_t pp[MLX5_ST_SZ_DW(set_pp_rate_limit_context)];
70 MLX5_ASSERT(!sh->txpp.pp);
71 memset(&pp, 0, sizeof(pp));
72 rate = NS_PER_S / sh->txpp.tick;
73 if (rate * sh->txpp.tick != NS_PER_S)
74 DRV_LOG(WARNING, "Packet pacing frequency is not precise.");
78 len = RTE_MAX(MLX5_TXPP_TEST_PKT_SIZE,
79 (size_t)RTE_ETHER_MIN_LEN);
80 MLX5_SET(set_pp_rate_limit_context, &pp,
81 burst_upper_bound, len);
82 MLX5_SET(set_pp_rate_limit_context, &pp,
83 typical_packet_size, len);
84 /* Convert packets per second into kilobits. */
85 rate = (rate * len) / (1000ul / CHAR_BIT);
86 DRV_LOG(INFO, "Packet pacing rate set to %" PRIu64, rate);
88 MLX5_SET(set_pp_rate_limit_context, &pp, rate_limit, rate);
89 MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
90 sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
91 sh->txpp.pp = mlx5_glue->dv_alloc_pp
92 (sh->ctx, sizeof(pp), &pp,
93 MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
94 if (sh->txpp.pp == NULL) {
95 DRV_LOG(ERR, "Failed to allocate packet pacing index.");
99 if (!sh->txpp.pp->index) {
100 DRV_LOG(ERR, "Zero packet pacing index allocated.");
101 mlx5_txpp_free_pp_index(sh);
105 sh->txpp.pp_id = sh->txpp.pp->index;
109 DRV_LOG(ERR, "Allocating pacing index is not supported.");
116 mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
119 claim_zero(mlx5_devx_cmd_destroy(wq->sq));
121 claim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem));
123 rte_free((void *)(uintptr_t)wq->sq_buf);
125 claim_zero(mlx5_devx_cmd_destroy(wq->cq));
127 claim_zero(mlx5_glue->devx_umem_dereg(wq->cq_umem));
129 rte_free((void *)(uintptr_t)wq->cq_buf);
130 memset(wq, 0, sizeof(*wq));
134 mlx5_txpp_destroy_rearm_queue(struct mlx5_dev_ctx_shared *sh)
136 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
138 mlx5_txpp_destroy_send_queue(wq);
142 mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh)
144 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
146 mlx5_txpp_destroy_send_queue(wq);
150 mlx5_txpp_fill_cqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
152 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
153 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
156 for (i = 0; i < MLX5_TXPP_REARM_CQ_SIZE; i++) {
157 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
163 mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
165 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
166 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
169 for (i = 0; i < wq->sq_size; i += 2) {
170 struct mlx5_wqe_cseg *cs;
171 struct mlx5_wqe_qseg *qs;
174 /* Build SEND_EN request with slave WQE index. */
175 cs = &wqe[i + 0].cseg;
176 cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0);
177 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
178 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS <<
179 MLX5_COMP_MODE_OFFSET);
180 cs->misc = RTE_BE32(0);
181 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
182 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) &
183 ((1 << MLX5_WQ_INDEX_WIDTH) - 1);
184 qs->max_index = rte_cpu_to_be_32(index);
185 qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.sq->id);
186 /* Build WAIT request with slave CQE index. */
187 cs = &wqe[i + 1].cseg;
188 cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0);
189 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
190 cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR <<
191 MLX5_COMP_MODE_OFFSET);
192 cs->misc = RTE_BE32(0);
193 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
194 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) &
195 ((1 << MLX5_CQ_INDEX_WIDTH) - 1);
196 qs->max_index = rte_cpu_to_be_32(index);
197 qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.cq->id);
201 /* Creates the Rearm Queue to fire the requests to Clock Queue in realtime. */
203 mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
205 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
206 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
207 struct mlx5_devx_cq_attr cq_attr = { 0 };
208 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
209 size_t page_size = sysconf(_SC_PAGESIZE);
210 uint32_t umem_size, umem_dbrec;
213 /* Allocate memory buffer for CQEs and doorbell record. */
214 umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE;
215 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
216 umem_size += MLX5_DBR_SIZE;
217 wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
218 page_size, sh->numa_node);
220 DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
223 /* Register allocated buffer in user space with DevX. */
224 wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
225 (void *)(uintptr_t)wq->cq_buf,
227 IBV_ACCESS_LOCAL_WRITE);
230 DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
233 /* Create completion queue object for Rearm Queue. */
234 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
235 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
236 cq_attr.uar_page_id = sh->tx_uar->page_id;
237 cq_attr.eqn = sh->txpp.eqn;
238 cq_attr.q_umem_valid = 1;
239 cq_attr.q_umem_offset = 0;
240 cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
241 cq_attr.db_umem_valid = 1;
242 cq_attr.db_umem_offset = umem_dbrec;
243 cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
244 cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_REARM_CQ_SIZE);
245 cq_attr.log_page_size = rte_log2_u32(page_size);
246 wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
249 DRV_LOG(ERR, "Failed to create CQ for Rearm Queue.");
252 wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
255 /* Mark all CQEs initially as invalid. */
256 mlx5_txpp_fill_cqe_rearm_queue(sh);
258 * Allocate memory buffer for Send Queue WQEs.
259 * There should be no WQE leftovers in the cyclic queue.
261 wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE;
262 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
263 umem_size = MLX5_WQE_SIZE * wq->sq_size;
264 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
265 umem_size += MLX5_DBR_SIZE;
266 wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
267 page_size, sh->numa_node);
269 DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
273 /* Register allocated buffer in user space with DevX. */
274 wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
275 (void *)(uintptr_t)wq->sq_buf,
277 IBV_ACCESS_LOCAL_WRITE);
280 DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
283 /* Create send queue object for Rearm Queue. */
284 sq_attr.state = MLX5_SQC_STATE_RST;
285 sq_attr.tis_lst_sz = 1;
286 sq_attr.tis_num = sh->tis->id;
287 sq_attr.cqn = wq->cq->id;
288 sq_attr.cd_master = 1;
289 sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
290 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
291 sq_attr.wq_attr.pd = sh->pdn;
292 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
293 sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
294 sq_attr.wq_attr.dbr_umem_valid = 1;
295 sq_attr.wq_attr.dbr_addr = umem_dbrec;
296 sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
297 sq_attr.wq_attr.wq_umem_valid = 1;
298 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
299 sq_attr.wq_attr.wq_umem_offset = 0;
300 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
303 DRV_LOG(ERR, "Failed to create SQ for Rearm Queue.");
306 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
307 MLX5_SND_DBR * sizeof(uint32_t));
308 /* Build the WQEs in the Send Queue before goto Ready state. */
309 mlx5_txpp_fill_wqe_rearm_queue(sh);
310 /* Change queue state to ready. */
311 msq_attr.sq_state = MLX5_SQC_STATE_RST;
312 msq_attr.state = MLX5_SQC_STATE_RDY;
313 ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
315 DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue.");
321 mlx5_txpp_destroy_rearm_queue(sh);
327 mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
329 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
330 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
331 struct mlx5_wqe_cseg *cs = &wqe->cseg;
332 uint32_t wqe_size, opcode, i;
335 /* For test purposes fill the WQ with SEND inline packet. */
337 wqe_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
339 2 * MLX5_WQE_ESEG_SIZE -
340 MLX5_ESEG_MIN_INLINE_SIZE,
342 opcode = MLX5_OPCODE_SEND;
344 wqe_size = MLX5_WSEG_SIZE;
345 opcode = MLX5_OPCODE_NOP;
347 cs->opcode = rte_cpu_to_be_32(opcode | 0); /* Index is ignored. */
348 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) |
349 (wqe_size / MLX5_WSEG_SIZE));
350 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
351 cs->misc = RTE_BE32(0);
352 wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE);
354 struct mlx5_wqe_eseg *es = &wqe->eseg;
355 struct rte_ether_hdr *eth_hdr;
356 struct rte_ipv4_hdr *ip_hdr;
357 struct rte_udp_hdr *udp_hdr;
359 /* Build the inline test packet pattern. */
360 MLX5_ASSERT(wqe_size <= MLX5_WQE_SIZE_MAX);
361 MLX5_ASSERT(MLX5_TXPP_TEST_PKT_SIZE >=
362 (sizeof(struct rte_ether_hdr) +
363 sizeof(struct rte_ipv4_hdr)));
365 es->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
370 es->inline_hdr_sz = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE);
371 /* Build test packet L2 header (Ethernet). */
372 dst = (uint8_t *)&es->inline_data;
373 eth_hdr = (struct rte_ether_hdr *)dst;
374 rte_eth_random_addr(ð_hdr->d_addr.addr_bytes[0]);
375 rte_eth_random_addr(ð_hdr->s_addr.addr_bytes[0]);
376 eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
377 /* Build test packet L3 header (IP v4). */
378 dst += sizeof(struct rte_ether_hdr);
379 ip_hdr = (struct rte_ipv4_hdr *)dst;
380 ip_hdr->version_ihl = RTE_IPV4_VHL_DEF;
381 ip_hdr->type_of_service = 0;
382 ip_hdr->fragment_offset = 0;
383 ip_hdr->time_to_live = 64;
384 ip_hdr->next_proto_id = IPPROTO_UDP;
385 ip_hdr->packet_id = 0;
386 ip_hdr->total_length = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
387 sizeof(struct rte_ether_hdr));
388 /* use RFC5735 / RFC2544 reserved network test addresses */
389 ip_hdr->src_addr = RTE_BE32((198U << 24) | (18 << 16) |
391 ip_hdr->dst_addr = RTE_BE32((198U << 24) | (18 << 16) |
393 if (MLX5_TXPP_TEST_PKT_SIZE <
394 (sizeof(struct rte_ether_hdr) +
395 sizeof(struct rte_ipv4_hdr) +
396 sizeof(struct rte_udp_hdr)))
398 /* Build test packet L4 header (UDP). */
399 dst += sizeof(struct rte_ipv4_hdr);
400 udp_hdr = (struct rte_udp_hdr *)dst;
401 udp_hdr->src_port = RTE_BE16(9); /* RFC863 Discard. */
402 udp_hdr->dst_port = RTE_BE16(9);
403 udp_hdr->dgram_len = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
404 sizeof(struct rte_ether_hdr) -
405 sizeof(struct rte_ipv4_hdr));
406 udp_hdr->dgram_cksum = 0;
407 /* Fill the test packet data. */
408 dst += sizeof(struct rte_udp_hdr);
409 for (i = sizeof(struct rte_ether_hdr) +
410 sizeof(struct rte_ipv4_hdr) +
411 sizeof(struct rte_udp_hdr);
412 i < MLX5_TXPP_TEST_PKT_SIZE; i++)
413 *dst++ = (uint8_t)(i & 0xFF);
416 /* Duplicate the pattern to the next WQEs. */
417 dst = (uint8_t *)(uintptr_t)wq->sq_buf;
418 for (i = 1; i < MLX5_TXPP_CLKQ_SIZE; i++) {
420 rte_memcpy(dst, (void *)(uintptr_t)wq->sq_buf, wqe_size);
424 /* Creates the Clock Queue for packet pacing, returns zero on success. */
426 mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
428 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
429 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
430 struct mlx5_devx_cq_attr cq_attr = { 0 };
431 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
432 size_t page_size = sysconf(_SC_PAGESIZE);
433 uint32_t umem_size, umem_dbrec;
436 /* Allocate memory buffer for CQEs and doorbell record. */
437 umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE;
438 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
439 umem_size += MLX5_DBR_SIZE;
440 wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
441 page_size, sh->numa_node);
443 DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
446 /* Register allocated buffer in user space with DevX. */
447 wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
448 (void *)(uintptr_t)wq->cq_buf,
450 IBV_ACCESS_LOCAL_WRITE);
453 DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
456 /* Create completion queue object for Clock Queue. */
457 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
458 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
459 cq_attr.use_first_only = 1;
460 cq_attr.overrun_ignore = 1;
461 cq_attr.uar_page_id = sh->tx_uar->page_id;
462 cq_attr.eqn = sh->txpp.eqn;
463 cq_attr.q_umem_valid = 1;
464 cq_attr.q_umem_offset = 0;
465 cq_attr.q_umem_id = wq->cq_umem->umem_id;
466 cq_attr.db_umem_valid = 1;
467 cq_attr.db_umem_offset = umem_dbrec;
468 cq_attr.db_umem_id = wq->cq_umem->umem_id;
469 cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_CLKQ_SIZE);
470 cq_attr.log_page_size = rte_log2_u32(page_size);
471 wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
474 DRV_LOG(ERR, "Failed to create CQ for Clock Queue.");
477 wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
479 /* Allocate memory buffer for Send Queue WQEs. */
481 wq->sq_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
483 2 * MLX5_WQE_ESEG_SIZE -
484 MLX5_ESEG_MIN_INLINE_SIZE,
485 MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
486 wq->sq_size *= MLX5_TXPP_CLKQ_SIZE;
488 wq->sq_size = MLX5_TXPP_CLKQ_SIZE;
490 /* There should not be WQE leftovers in the cyclic queue. */
491 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
492 umem_size = MLX5_WQE_SIZE * wq->sq_size;
493 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
494 umem_size += MLX5_DBR_SIZE;
495 wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
496 page_size, sh->numa_node);
498 DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
502 /* Register allocated buffer in user space with DevX. */
503 wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
504 (void *)(uintptr_t)wq->sq_buf,
506 IBV_ACCESS_LOCAL_WRITE);
509 DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
512 /* Create send queue object for Clock Queue. */
514 sq_attr.tis_lst_sz = 1;
515 sq_attr.tis_num = sh->tis->id;
516 sq_attr.non_wire = 0;
517 sq_attr.static_sq_wq = 1;
519 sq_attr.non_wire = 1;
520 sq_attr.static_sq_wq = 1;
522 sq_attr.state = MLX5_SQC_STATE_RST;
523 sq_attr.cqn = wq->cq->id;
524 sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
525 sq_attr.wq_attr.cd_slave = 1;
526 sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
527 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
528 sq_attr.wq_attr.pd = sh->pdn;
529 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
530 sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
531 sq_attr.wq_attr.dbr_umem_valid = 1;
532 sq_attr.wq_attr.dbr_addr = umem_dbrec;
533 sq_attr.wq_attr.dbr_umem_id = wq->sq_umem->umem_id;
534 sq_attr.wq_attr.wq_umem_valid = 1;
535 sq_attr.wq_attr.wq_umem_id = wq->sq_umem->umem_id;
536 /* umem_offset must be zero for static_sq_wq queue. */
537 sq_attr.wq_attr.wq_umem_offset = 0;
538 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
541 DRV_LOG(ERR, "Failed to create SQ for Clock Queue.");
544 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
545 MLX5_SND_DBR * sizeof(uint32_t));
546 /* Build the WQEs in the Send Queue before goto Ready state. */
547 mlx5_txpp_fill_wqe_clock_queue(sh);
548 /* Change queue state to ready. */
549 msq_attr.sq_state = MLX5_SQC_STATE_RST;
550 msq_attr.state = MLX5_SQC_STATE_RDY;
552 ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
554 DRV_LOG(ERR, "Failed to set SQ ready state Clock Queue.");
560 mlx5_txpp_destroy_clock_queue(sh);
566 * The routine initializes the packet pacing infrastructure:
567 * - allocates PP context
570 * - attaches rearm interrupt handler
571 * - starts Clock Queue
573 * Returns 0 on success, negative otherwise
576 mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
578 int tx_pp = priv->config.tx_pp;
581 /* Store the requested pacing parameters. */
582 sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp;
583 sh->txpp.test = !!(tx_pp < 0);
584 sh->txpp.skew = priv->config.tx_skew;
585 sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
586 ret = mlx5_txpp_create_eqn(sh);
589 ret = mlx5_txpp_alloc_pp_index(sh);
592 ret = mlx5_txpp_create_clock_queue(sh);
595 ret = mlx5_txpp_create_rearm_queue(sh);
600 mlx5_txpp_destroy_rearm_queue(sh);
601 mlx5_txpp_destroy_clock_queue(sh);
602 mlx5_txpp_free_pp_index(sh);
603 mlx5_txpp_destroy_eqn(sh);
612 * The routine destroys the packet pacing infrastructure:
613 * - detaches rearm interrupt handler
619 mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh)
621 mlx5_txpp_destroy_rearm_queue(sh);
622 mlx5_txpp_destroy_clock_queue(sh);
623 mlx5_txpp_free_pp_index(sh);
624 mlx5_txpp_destroy_eqn(sh);
631 * Creates and starts packet pacing infrastructure on specified device.
634 * Pointer to Ethernet device structure.
637 * 0 on success, a negative errno value otherwise and rte_errno is set.
640 mlx5_txpp_start(struct rte_eth_dev *dev)
642 struct mlx5_priv *priv = dev->data->dev_private;
643 struct mlx5_dev_ctx_shared *sh = priv->sh;
647 if (!priv->config.tx_pp) {
648 /* Packet pacing is not requested for the device. */
649 MLX5_ASSERT(priv->txpp_en == 0);
653 /* Packet pacing is already enabled for the device. */
654 MLX5_ASSERT(sh->txpp.refcnt);
657 if (priv->config.tx_pp > 0) {
658 ret = rte_mbuf_dynflag_lookup
659 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
663 ret = pthread_mutex_lock(&sh->txpp.mutex);
666 if (sh->txpp.refcnt) {
670 err = mlx5_txpp_create(sh, priv);
672 MLX5_ASSERT(sh->txpp.tick);
679 ret = pthread_mutex_unlock(&sh->txpp.mutex);
686 * Stops and destroys packet pacing infrastructure on specified device.
689 * Pointer to Ethernet device structure.
692 * 0 on success, a negative errno value otherwise and rte_errno is set.
695 mlx5_txpp_stop(struct rte_eth_dev *dev)
697 struct mlx5_priv *priv = dev->data->dev_private;
698 struct mlx5_dev_ctx_shared *sh = priv->sh;
701 if (!priv->txpp_en) {
702 /* Packet pacing is already disabled for the device. */
706 ret = pthread_mutex_lock(&sh->txpp.mutex);
709 MLX5_ASSERT(sh->txpp.refcnt);
710 if (!sh->txpp.refcnt || --sh->txpp.refcnt)
712 /* No references any more, do actual destroy. */
713 mlx5_txpp_destroy(sh);
714 ret = pthread_mutex_unlock(&sh->txpp.mutex);