1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
8 #include <rte_ethdev_driver.h>
9 #include <rte_interrupts.h>
10 #include <rte_alarm.h>
11 #include <rte_malloc.h>
12 #include <rte_cycles.h>
15 #include "mlx5_rxtx.h"
16 #include "mlx5_common_os.h"
18 /* Destroy Event Queue Notification Channel. */
20 mlx5_txpp_destroy_eqn(struct mlx5_dev_ctx_shared *sh)
23 mlx5_glue->devx_destroy_event_channel(sh->txpp.echan);
24 sh->txpp.echan = NULL;
29 /* Create Event Queue Notification Channel. */
31 mlx5_txpp_create_eqn(struct mlx5_dev_ctx_shared *sh)
35 MLX5_ASSERT(!sh->txpp.echan);
36 lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
37 if (mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->txpp.eqn)) {
39 DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
43 sh->txpp.echan = mlx5_glue->devx_create_event_channel(sh->ctx,
44 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
45 if (!sh->txpp.echan) {
48 DRV_LOG(ERR, "Failed to create event channel %d.",
56 mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh)
59 mlx5_glue->dv_free_pp(sh->txpp.pp);
65 /* Allocate Packet Pacing index from kernel via mlx5dv call. */
67 mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
69 #ifdef HAVE_MLX5DV_PP_ALLOC
70 uint32_t pp[MLX5_ST_SZ_DW(set_pp_rate_limit_context)];
73 MLX5_ASSERT(!sh->txpp.pp);
74 memset(&pp, 0, sizeof(pp));
75 rate = NS_PER_S / sh->txpp.tick;
76 if (rate * sh->txpp.tick != NS_PER_S)
77 DRV_LOG(WARNING, "Packet pacing frequency is not precise.");
81 len = RTE_MAX(MLX5_TXPP_TEST_PKT_SIZE,
82 (size_t)RTE_ETHER_MIN_LEN);
83 MLX5_SET(set_pp_rate_limit_context, &pp,
84 burst_upper_bound, len);
85 MLX5_SET(set_pp_rate_limit_context, &pp,
86 typical_packet_size, len);
87 /* Convert packets per second into kilobits. */
88 rate = (rate * len) / (1000ul / CHAR_BIT);
89 DRV_LOG(INFO, "Packet pacing rate set to %" PRIu64, rate);
91 MLX5_SET(set_pp_rate_limit_context, &pp, rate_limit, rate);
92 MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
93 sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
94 sh->txpp.pp = mlx5_glue->dv_alloc_pp
95 (sh->ctx, sizeof(pp), &pp,
96 MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
97 if (sh->txpp.pp == NULL) {
98 DRV_LOG(ERR, "Failed to allocate packet pacing index.");
102 if (!sh->txpp.pp->index) {
103 DRV_LOG(ERR, "Zero packet pacing index allocated.");
104 mlx5_txpp_free_pp_index(sh);
108 sh->txpp.pp_id = sh->txpp.pp->index;
112 DRV_LOG(ERR, "Allocating pacing index is not supported.");
119 mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
122 claim_zero(mlx5_devx_cmd_destroy(wq->sq));
124 claim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem));
126 rte_free((void *)(uintptr_t)wq->sq_buf);
128 claim_zero(mlx5_devx_cmd_destroy(wq->cq));
130 claim_zero(mlx5_glue->devx_umem_dereg(wq->cq_umem));
132 rte_free((void *)(uintptr_t)wq->cq_buf);
133 memset(wq, 0, sizeof(*wq));
137 mlx5_txpp_destroy_rearm_queue(struct mlx5_dev_ctx_shared *sh)
139 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
141 mlx5_txpp_destroy_send_queue(wq);
145 mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh)
147 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
149 mlx5_txpp_destroy_send_queue(wq);
151 rte_free(sh->txpp.tsa);
157 mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci)
159 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
166 cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32
167 (wq->wqes[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8);
168 cs.w32[1] = wq->wqes[ci & (wq->sq_size - 1)].ctrl[1];
169 /* Update SQ doorbell record with new SQ ci. */
170 rte_compiler_barrier();
171 *wq->sq_dbrec = rte_cpu_to_be_32(wq->sq_ci);
172 /* Make sure the doorbell record is updated. */
174 /* Write to doorbel register to start processing. */
175 __mlx5_uar_write64_relaxed(cs.w64, sh->tx_uar->reg_addr, NULL);
180 mlx5_txpp_fill_cqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
182 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
183 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
186 for (i = 0; i < MLX5_TXPP_REARM_CQ_SIZE; i++) {
187 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
193 mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
195 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
196 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
199 for (i = 0; i < wq->sq_size; i += 2) {
200 struct mlx5_wqe_cseg *cs;
201 struct mlx5_wqe_qseg *qs;
204 /* Build SEND_EN request with slave WQE index. */
205 cs = &wqe[i + 0].cseg;
206 cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0);
207 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
208 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS <<
209 MLX5_COMP_MODE_OFFSET);
210 cs->misc = RTE_BE32(0);
211 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
212 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) &
213 ((1 << MLX5_WQ_INDEX_WIDTH) - 1);
214 qs->max_index = rte_cpu_to_be_32(index);
215 qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.sq->id);
216 /* Build WAIT request with slave CQE index. */
217 cs = &wqe[i + 1].cseg;
218 cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0);
219 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
220 cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR <<
221 MLX5_COMP_MODE_OFFSET);
222 cs->misc = RTE_BE32(0);
223 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
224 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) &
225 ((1 << MLX5_CQ_INDEX_WIDTH) - 1);
226 qs->max_index = rte_cpu_to_be_32(index);
227 qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.cq->id);
231 /* Creates the Rearm Queue to fire the requests to Clock Queue in realtime. */
233 mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
235 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
236 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
237 struct mlx5_devx_cq_attr cq_attr = { 0 };
238 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
239 size_t page_size = sysconf(_SC_PAGESIZE);
240 uint32_t umem_size, umem_dbrec;
243 /* Allocate memory buffer for CQEs and doorbell record. */
244 umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE;
245 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
246 umem_size += MLX5_DBR_SIZE;
247 wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
248 page_size, sh->numa_node);
250 DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
253 /* Register allocated buffer in user space with DevX. */
254 wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
255 (void *)(uintptr_t)wq->cq_buf,
257 IBV_ACCESS_LOCAL_WRITE);
260 DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
263 /* Create completion queue object for Rearm Queue. */
264 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
265 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
266 cq_attr.uar_page_id = sh->tx_uar->page_id;
267 cq_attr.eqn = sh->txpp.eqn;
268 cq_attr.q_umem_valid = 1;
269 cq_attr.q_umem_offset = 0;
270 cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
271 cq_attr.db_umem_valid = 1;
272 cq_attr.db_umem_offset = umem_dbrec;
273 cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
274 cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_REARM_CQ_SIZE);
275 cq_attr.log_page_size = rte_log2_u32(page_size);
276 wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
279 DRV_LOG(ERR, "Failed to create CQ for Rearm Queue.");
282 wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
285 /* Mark all CQEs initially as invalid. */
286 mlx5_txpp_fill_cqe_rearm_queue(sh);
288 * Allocate memory buffer for Send Queue WQEs.
289 * There should be no WQE leftovers in the cyclic queue.
291 wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE;
292 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
293 umem_size = MLX5_WQE_SIZE * wq->sq_size;
294 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
295 umem_size += MLX5_DBR_SIZE;
296 wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
297 page_size, sh->numa_node);
299 DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
303 /* Register allocated buffer in user space with DevX. */
304 wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
305 (void *)(uintptr_t)wq->sq_buf,
307 IBV_ACCESS_LOCAL_WRITE);
310 DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
313 /* Create send queue object for Rearm Queue. */
314 sq_attr.state = MLX5_SQC_STATE_RST;
315 sq_attr.tis_lst_sz = 1;
316 sq_attr.tis_num = sh->tis->id;
317 sq_attr.cqn = wq->cq->id;
318 sq_attr.cd_master = 1;
319 sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
320 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
321 sq_attr.wq_attr.pd = sh->pdn;
322 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
323 sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
324 sq_attr.wq_attr.dbr_umem_valid = 1;
325 sq_attr.wq_attr.dbr_addr = umem_dbrec;
326 sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
327 sq_attr.wq_attr.wq_umem_valid = 1;
328 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
329 sq_attr.wq_attr.wq_umem_offset = 0;
330 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
333 DRV_LOG(ERR, "Failed to create SQ for Rearm Queue.");
336 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
337 MLX5_SND_DBR * sizeof(uint32_t));
338 /* Build the WQEs in the Send Queue before goto Ready state. */
339 mlx5_txpp_fill_wqe_rearm_queue(sh);
340 /* Change queue state to ready. */
341 msq_attr.sq_state = MLX5_SQC_STATE_RST;
342 msq_attr.state = MLX5_SQC_STATE_RDY;
343 ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
345 DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue.");
351 mlx5_txpp_destroy_rearm_queue(sh);
357 mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
359 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
360 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
361 struct mlx5_wqe_cseg *cs = &wqe->cseg;
362 uint32_t wqe_size, opcode, i;
365 /* For test purposes fill the WQ with SEND inline packet. */
367 wqe_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
369 2 * MLX5_WQE_ESEG_SIZE -
370 MLX5_ESEG_MIN_INLINE_SIZE,
372 opcode = MLX5_OPCODE_SEND;
374 wqe_size = MLX5_WSEG_SIZE;
375 opcode = MLX5_OPCODE_NOP;
377 cs->opcode = rte_cpu_to_be_32(opcode | 0); /* Index is ignored. */
378 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) |
379 (wqe_size / MLX5_WSEG_SIZE));
380 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
381 cs->misc = RTE_BE32(0);
382 wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE);
384 struct mlx5_wqe_eseg *es = &wqe->eseg;
385 struct rte_ether_hdr *eth_hdr;
386 struct rte_ipv4_hdr *ip_hdr;
387 struct rte_udp_hdr *udp_hdr;
389 /* Build the inline test packet pattern. */
390 MLX5_ASSERT(wqe_size <= MLX5_WQE_SIZE_MAX);
391 MLX5_ASSERT(MLX5_TXPP_TEST_PKT_SIZE >=
392 (sizeof(struct rte_ether_hdr) +
393 sizeof(struct rte_ipv4_hdr)));
395 es->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
400 es->inline_hdr_sz = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE);
401 /* Build test packet L2 header (Ethernet). */
402 dst = (uint8_t *)&es->inline_data;
403 eth_hdr = (struct rte_ether_hdr *)dst;
404 rte_eth_random_addr(ð_hdr->d_addr.addr_bytes[0]);
405 rte_eth_random_addr(ð_hdr->s_addr.addr_bytes[0]);
406 eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
407 /* Build test packet L3 header (IP v4). */
408 dst += sizeof(struct rte_ether_hdr);
409 ip_hdr = (struct rte_ipv4_hdr *)dst;
410 ip_hdr->version_ihl = RTE_IPV4_VHL_DEF;
411 ip_hdr->type_of_service = 0;
412 ip_hdr->fragment_offset = 0;
413 ip_hdr->time_to_live = 64;
414 ip_hdr->next_proto_id = IPPROTO_UDP;
415 ip_hdr->packet_id = 0;
416 ip_hdr->total_length = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
417 sizeof(struct rte_ether_hdr));
418 /* use RFC5735 / RFC2544 reserved network test addresses */
419 ip_hdr->src_addr = RTE_BE32((198U << 24) | (18 << 16) |
421 ip_hdr->dst_addr = RTE_BE32((198U << 24) | (18 << 16) |
423 if (MLX5_TXPP_TEST_PKT_SIZE <
424 (sizeof(struct rte_ether_hdr) +
425 sizeof(struct rte_ipv4_hdr) +
426 sizeof(struct rte_udp_hdr)))
428 /* Build test packet L4 header (UDP). */
429 dst += sizeof(struct rte_ipv4_hdr);
430 udp_hdr = (struct rte_udp_hdr *)dst;
431 udp_hdr->src_port = RTE_BE16(9); /* RFC863 Discard. */
432 udp_hdr->dst_port = RTE_BE16(9);
433 udp_hdr->dgram_len = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
434 sizeof(struct rte_ether_hdr) -
435 sizeof(struct rte_ipv4_hdr));
436 udp_hdr->dgram_cksum = 0;
437 /* Fill the test packet data. */
438 dst += sizeof(struct rte_udp_hdr);
439 for (i = sizeof(struct rte_ether_hdr) +
440 sizeof(struct rte_ipv4_hdr) +
441 sizeof(struct rte_udp_hdr);
442 i < MLX5_TXPP_TEST_PKT_SIZE; i++)
443 *dst++ = (uint8_t)(i & 0xFF);
446 /* Duplicate the pattern to the next WQEs. */
447 dst = (uint8_t *)(uintptr_t)wq->sq_buf;
448 for (i = 1; i < MLX5_TXPP_CLKQ_SIZE; i++) {
450 rte_memcpy(dst, (void *)(uintptr_t)wq->sq_buf, wqe_size);
454 /* Creates the Clock Queue for packet pacing, returns zero on success. */
456 mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
458 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
459 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
460 struct mlx5_devx_cq_attr cq_attr = { 0 };
461 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
462 size_t page_size = sysconf(_SC_PAGESIZE);
463 uint32_t umem_size, umem_dbrec;
466 sh->txpp.tsa = rte_zmalloc_socket(__func__,
467 MLX5_TXPP_REARM_SQ_SIZE *
468 sizeof(struct mlx5_txpp_ts),
471 DRV_LOG(ERR, "Failed to allocate memory for CQ stats.");
476 /* Allocate memory buffer for CQEs and doorbell record. */
477 umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE;
478 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
479 umem_size += MLX5_DBR_SIZE;
480 wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
481 page_size, sh->numa_node);
483 DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
486 /* Register allocated buffer in user space with DevX. */
487 wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
488 (void *)(uintptr_t)wq->cq_buf,
490 IBV_ACCESS_LOCAL_WRITE);
493 DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
496 /* Create completion queue object for Clock Queue. */
497 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
498 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
499 cq_attr.use_first_only = 1;
500 cq_attr.overrun_ignore = 1;
501 cq_attr.uar_page_id = sh->tx_uar->page_id;
502 cq_attr.eqn = sh->txpp.eqn;
503 cq_attr.q_umem_valid = 1;
504 cq_attr.q_umem_offset = 0;
505 cq_attr.q_umem_id = wq->cq_umem->umem_id;
506 cq_attr.db_umem_valid = 1;
507 cq_attr.db_umem_offset = umem_dbrec;
508 cq_attr.db_umem_id = wq->cq_umem->umem_id;
509 cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_CLKQ_SIZE);
510 cq_attr.log_page_size = rte_log2_u32(page_size);
511 wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
514 DRV_LOG(ERR, "Failed to create CQ for Clock Queue.");
517 wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
519 /* Allocate memory buffer for Send Queue WQEs. */
521 wq->sq_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
523 2 * MLX5_WQE_ESEG_SIZE -
524 MLX5_ESEG_MIN_INLINE_SIZE,
525 MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
526 wq->sq_size *= MLX5_TXPP_CLKQ_SIZE;
528 wq->sq_size = MLX5_TXPP_CLKQ_SIZE;
530 /* There should not be WQE leftovers in the cyclic queue. */
531 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
532 umem_size = MLX5_WQE_SIZE * wq->sq_size;
533 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
534 umem_size += MLX5_DBR_SIZE;
535 wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
536 page_size, sh->numa_node);
538 DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
542 /* Register allocated buffer in user space with DevX. */
543 wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
544 (void *)(uintptr_t)wq->sq_buf,
546 IBV_ACCESS_LOCAL_WRITE);
549 DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
552 /* Create send queue object for Clock Queue. */
554 sq_attr.tis_lst_sz = 1;
555 sq_attr.tis_num = sh->tis->id;
556 sq_attr.non_wire = 0;
557 sq_attr.static_sq_wq = 1;
559 sq_attr.non_wire = 1;
560 sq_attr.static_sq_wq = 1;
562 sq_attr.state = MLX5_SQC_STATE_RST;
563 sq_attr.cqn = wq->cq->id;
564 sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
565 sq_attr.wq_attr.cd_slave = 1;
566 sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
567 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
568 sq_attr.wq_attr.pd = sh->pdn;
569 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
570 sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
571 sq_attr.wq_attr.dbr_umem_valid = 1;
572 sq_attr.wq_attr.dbr_addr = umem_dbrec;
573 sq_attr.wq_attr.dbr_umem_id = wq->sq_umem->umem_id;
574 sq_attr.wq_attr.wq_umem_valid = 1;
575 sq_attr.wq_attr.wq_umem_id = wq->sq_umem->umem_id;
576 /* umem_offset must be zero for static_sq_wq queue. */
577 sq_attr.wq_attr.wq_umem_offset = 0;
578 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
581 DRV_LOG(ERR, "Failed to create SQ for Clock Queue.");
584 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
585 MLX5_SND_DBR * sizeof(uint32_t));
586 /* Build the WQEs in the Send Queue before goto Ready state. */
587 mlx5_txpp_fill_wqe_clock_queue(sh);
588 /* Change queue state to ready. */
589 msq_attr.sq_state = MLX5_SQC_STATE_RST;
590 msq_attr.state = MLX5_SQC_STATE_RDY;
592 ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
594 DRV_LOG(ERR, "Failed to set SQ ready state Clock Queue.");
600 mlx5_txpp_destroy_clock_queue(sh);
605 /* Enable notification from the Rearm Queue CQ. */
607 mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh)
609 struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;
610 uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;
611 uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;
612 uint64_t db_be = rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq->id);
613 uint32_t *addr = RTE_PTR_ADD(sh->tx_uar->base_addr, MLX5_CQ_DOORBELL);
615 rte_compiler_barrier();
616 aq->cq_dbrec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);
619 *(uint64_t *)addr = db_be;
621 *(uint32_t *)addr = db_be;
623 *((uint32_t *)addr + 1) = db_be >> 32;
629 mlx5_atomic_read_cqe(rte_int128_t *from, rte_int128_t *ts)
632 * The only CQE of Clock Queue is being continuously
633 * update by hardware with soecified rate. We have to
634 * read timestump and WQE completion index atomically.
636 #if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_ARM64)
639 memset(&src, 0, sizeof(src));
641 /* if (*from == *ts) *from = *src else *ts = *from; */
642 rte_atomic128_cmp_exchange(from, ts, &src, 0,
643 __ATOMIC_RELAXED, __ATOMIC_RELAXED);
645 rte_atomic64_t *cqe = (rte_atomic64_t *)from;
647 /* Power architecture does not support 16B compare-and-swap. */
652 rte_compiler_barrier();
653 tm = rte_atomic64_read(cqe + 0);
654 op = rte_atomic64_read(cqe + 1);
655 rte_compiler_barrier();
656 if (tm != rte_atomic64_read(cqe + 0))
658 if (op != rte_atomic64_read(cqe + 1))
668 /* Stores timestamp in the cache structure to share data with datapath. */
670 mlx5_txpp_cache_timestamp(struct mlx5_dev_ctx_shared *sh,
671 uint64_t ts, uint64_t ci)
673 ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
674 ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
675 rte_compiler_barrier();
676 rte_atomic64_set(&sh->txpp.ts.ts, ts);
677 rte_atomic64_set(&sh->txpp.ts.ci_ts, ci);
681 /* Reads timestamp from Clock Queue CQE and stores in the cache. */
683 mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh)
685 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
686 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
689 struct mlx5_cqe_ts cts;
694 static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),
695 "Wrong timestamp CQE part size");
696 mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
697 if (to.cts.op_own >> 4) {
698 DRV_LOG(DEBUG, "Clock Queue error sync lost.");
699 rte_atomic32_inc(&sh->txpp.err_clock_queue);
700 sh->txpp.sync_lost = 1;
703 ci = rte_be_to_cpu_16(to.cts.wqe_counter);
704 ts = rte_be_to_cpu_64(to.cts.timestamp);
705 ts = mlx5_txpp_convert_rx_ts(sh, ts);
706 wq->cq_ci += (ci - wq->sq_ci) & UINT16_MAX;
708 mlx5_txpp_cache_timestamp(sh, ts, wq->cq_ci);
711 /* Waits for the first completion on Clock Queue to init timestamp. */
713 mlx5_txpp_init_timestamp(struct mlx5_dev_ctx_shared *sh)
715 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
720 for (wait = 0; wait < MLX5_TXPP_WAIT_INIT_TS; wait++) {
721 struct timespec onems;
723 mlx5_txpp_update_timestamp(sh);
726 /* Wait one millisecond and try again. */
728 onems.tv_nsec = NS_PER_S / MS_PER_S;
729 nanosleep(&onems, 0);
731 DRV_LOG(ERR, "Unable to initialize timestamp.");
732 sh->txpp.sync_lost = 1;
735 #ifdef HAVE_IBV_DEVX_EVENT
736 /* Gather statistics for timestamp from Clock Queue CQE. */
738 mlx5_txpp_gather_timestamp(struct mlx5_dev_ctx_shared *sh)
740 /* Check whether we have a valid timestamp. */
741 if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
743 MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
744 sh->txpp.tsa[sh->txpp.ts_p] = sh->txpp.ts;
745 if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
747 if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
751 /* Handles Rearm Queue completions in periodic service. */
752 static __rte_always_inline void
753 mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh)
755 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
756 uint32_t cq_ci = wq->cq_ci;
761 volatile struct mlx5_cqe *cqe;
763 cqe = &wq->cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)];
764 ret = check_cqe(cqe, MLX5_TXPP_REARM_CQ_SIZE, cq_ci);
766 case MLX5_CQE_STATUS_ERR:
770 case MLX5_CQE_STATUS_SW_OWN:
774 case MLX5_CQE_STATUS_HW_OWN:
780 } while (ret != MLX5_CQE_STATUS_HW_OWN);
781 if (likely(cq_ci != wq->cq_ci)) {
782 /* Check whether we have missed interrupts. */
783 if (cq_ci - wq->cq_ci != 1) {
784 DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
785 rte_atomic32_inc(&sh->txpp.err_miss_int);
786 /* Check sync lost on wqe index. */
787 if (cq_ci - wq->cq_ci >=
788 (((1UL << MLX5_WQ_INDEX_WIDTH) /
789 MLX5_TXPP_REARM) - 1))
792 /* Update doorbell record to notify hardware. */
793 rte_compiler_barrier();
794 *wq->cq_dbrec = rte_cpu_to_be_32(cq_ci);
797 /* Fire new requests to Rearm Queue. */
799 DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
800 rte_atomic32_inc(&sh->txpp.err_rearm_queue);
801 sh->txpp.sync_lost = 1;
806 /* Handles Clock Queue completions in periodic service. */
807 static __rte_always_inline void
808 mlx5_txpp_handle_clock_queue(struct mlx5_dev_ctx_shared *sh)
810 mlx5_txpp_update_timestamp(sh);
811 mlx5_txpp_gather_timestamp(sh);
815 /* Invoked periodically on Rearm Queue completions. */
817 mlx5_txpp_interrupt_handler(void *cb_arg)
819 #ifndef HAVE_IBV_DEVX_EVENT
820 RTE_SET_USED(cb_arg);
823 struct mlx5_dev_ctx_shared *sh = cb_arg;
825 struct mlx5dv_devx_async_event_hdr event_resp;
826 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
829 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
830 /* Process events in the loop. Only rearm completions are expected. */
831 while (mlx5_glue->devx_get_event
835 (ssize_t)sizeof(out.event_resp.cookie)) {
836 mlx5_txpp_handle_rearm_queue(sh);
837 mlx5_txpp_handle_clock_queue(sh);
838 mlx5_txpp_cq_arm(sh);
839 mlx5_txpp_doorbell_rearm_queue
840 (sh, sh->txpp.rearm_queue.sq_ci - 1);
842 #endif /* HAVE_IBV_DEVX_ASYNC */
846 mlx5_txpp_stop_service(struct mlx5_dev_ctx_shared *sh)
848 if (!sh->txpp.intr_handle.fd)
850 mlx5_intr_callback_unregister(&sh->txpp.intr_handle,
851 mlx5_txpp_interrupt_handler, sh);
852 sh->txpp.intr_handle.fd = 0;
855 /* Attach interrupt handler and fires first request to Rearm Queue. */
857 mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
859 uint16_t event_nums[1] = {0};
863 /* Attach interrupt handler to process Rearm Queue completions. */
864 flags = fcntl(sh->txpp.echan->fd, F_GETFL);
865 ret = fcntl(sh->txpp.echan->fd, F_SETFL, flags | O_NONBLOCK);
867 DRV_LOG(ERR, "Failed to change event channel FD.");
871 memset(&sh->txpp.intr_handle, 0, sizeof(sh->txpp.intr_handle));
872 sh->txpp.intr_handle.fd = sh->txpp.echan->fd;
873 sh->txpp.intr_handle.type = RTE_INTR_HANDLE_EXT;
874 if (rte_intr_callback_register(&sh->txpp.intr_handle,
875 mlx5_txpp_interrupt_handler, sh)) {
876 sh->txpp.intr_handle.fd = 0;
877 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
880 /* Subscribe CQ event to the event channel controlled by the driver. */
881 ret = mlx5_glue->devx_subscribe_devx_event(sh->txpp.echan,
882 sh->txpp.rearm_queue.cq->obj,
886 DRV_LOG(ERR, "Failed to subscribe CQE event.");
890 /* Enable interrupts in the CQ. */
891 mlx5_txpp_cq_arm(sh);
892 /* Fire the first request on Rearm Queue. */
893 mlx5_txpp_doorbell_rearm_queue(sh, sh->txpp.rearm_queue.sq_size - 1);
894 mlx5_txpp_init_timestamp(sh);
899 * The routine initializes the packet pacing infrastructure:
900 * - allocates PP context
903 * - attaches rearm interrupt handler
904 * - starts Clock Queue
906 * Returns 0 on success, negative otherwise
909 mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
911 int tx_pp = priv->config.tx_pp;
914 /* Store the requested pacing parameters. */
915 sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp;
916 sh->txpp.test = !!(tx_pp < 0);
917 sh->txpp.skew = priv->config.tx_skew;
918 sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
919 ret = mlx5_txpp_create_eqn(sh);
922 ret = mlx5_txpp_alloc_pp_index(sh);
925 ret = mlx5_txpp_create_clock_queue(sh);
928 ret = mlx5_txpp_create_rearm_queue(sh);
931 ret = mlx5_txpp_start_service(sh);
936 mlx5_txpp_stop_service(sh);
937 mlx5_txpp_destroy_rearm_queue(sh);
938 mlx5_txpp_destroy_clock_queue(sh);
939 mlx5_txpp_free_pp_index(sh);
940 mlx5_txpp_destroy_eqn(sh);
949 * The routine destroys the packet pacing infrastructure:
950 * - detaches rearm interrupt handler
956 mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh)
958 mlx5_txpp_stop_service(sh);
959 mlx5_txpp_destroy_rearm_queue(sh);
960 mlx5_txpp_destroy_clock_queue(sh);
961 mlx5_txpp_free_pp_index(sh);
962 mlx5_txpp_destroy_eqn(sh);
969 * Creates and starts packet pacing infrastructure on specified device.
972 * Pointer to Ethernet device structure.
975 * 0 on success, a negative errno value otherwise and rte_errno is set.
978 mlx5_txpp_start(struct rte_eth_dev *dev)
980 struct mlx5_priv *priv = dev->data->dev_private;
981 struct mlx5_dev_ctx_shared *sh = priv->sh;
985 if (!priv->config.tx_pp) {
986 /* Packet pacing is not requested for the device. */
987 MLX5_ASSERT(priv->txpp_en == 0);
991 /* Packet pacing is already enabled for the device. */
992 MLX5_ASSERT(sh->txpp.refcnt);
995 if (priv->config.tx_pp > 0) {
996 ret = rte_mbuf_dynflag_lookup
997 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1001 ret = pthread_mutex_lock(&sh->txpp.mutex);
1004 if (sh->txpp.refcnt) {
1008 err = mlx5_txpp_create(sh, priv);
1010 MLX5_ASSERT(sh->txpp.tick);
1012 sh->txpp.refcnt = 1;
1017 ret = pthread_mutex_unlock(&sh->txpp.mutex);
1024 * Stops and destroys packet pacing infrastructure on specified device.
1027 * Pointer to Ethernet device structure.
1030 * 0 on success, a negative errno value otherwise and rte_errno is set.
1033 mlx5_txpp_stop(struct rte_eth_dev *dev)
1035 struct mlx5_priv *priv = dev->data->dev_private;
1036 struct mlx5_dev_ctx_shared *sh = priv->sh;
1039 if (!priv->txpp_en) {
1040 /* Packet pacing is already disabled for the device. */
1044 ret = pthread_mutex_lock(&sh->txpp.mutex);
1047 MLX5_ASSERT(sh->txpp.refcnt);
1048 if (!sh->txpp.refcnt || --sh->txpp.refcnt)
1050 /* No references any more, do actual destroy. */
1051 mlx5_txpp_destroy(sh);
1052 ret = pthread_mutex_unlock(&sh->txpp.mutex);