1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
8 #include <rte_ethdev_driver.h>
9 #include <rte_interrupts.h>
10 #include <rte_alarm.h>
11 #include <rte_malloc.h>
12 #include <rte_cycles.h>
13 #include <rte_eal_paging.h>
15 #include <mlx5_malloc.h>
18 #include "mlx5_rxtx.h"
19 #include "mlx5_common_os.h"
21 static const char * const mlx5_txpp_stat_names[] = {
22 "tx_pp_missed_interrupt_errors", /* Missed service interrupt. */
23 "tx_pp_rearm_queue_errors", /* Rearm Queue errors. */
24 "tx_pp_clock_queue_errors", /* Clock Queue errors. */
25 "tx_pp_timestamp_past_errors", /* Timestamp in the past. */
26 "tx_pp_timestamp_future_errors", /* Timestamp in the distant future. */
27 "tx_pp_jitter", /* Timestamp jitter (one Clock Queue completion). */
28 "tx_pp_wander", /* Timestamp wander (half of Clock Queue CQEs). */
29 "tx_pp_sync_lost", /* Scheduling synchronization lost. */
32 /* Destroy Event Queue Notification Channel. */
34 mlx5_txpp_destroy_event_channel(struct mlx5_dev_ctx_shared *sh)
37 mlx5_glue->devx_destroy_event_channel(sh->txpp.echan);
38 sh->txpp.echan = NULL;
42 /* Create Event Queue Notification Channel. */
44 mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
46 MLX5_ASSERT(!sh->txpp.echan);
47 sh->txpp.echan = mlx5_glue->devx_create_event_channel(sh->ctx,
48 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
49 if (!sh->txpp.echan) {
51 DRV_LOG(ERR, "Failed to create event channel %d.", rte_errno);
58 mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh)
60 #ifdef HAVE_MLX5DV_PP_ALLOC
62 mlx5_glue->dv_free_pp(sh->txpp.pp);
68 DRV_LOG(ERR, "Freeing pacing index is not supported.");
72 /* Allocate Packet Pacing index from kernel via mlx5dv call. */
74 mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
76 #ifdef HAVE_MLX5DV_PP_ALLOC
77 uint32_t pp[MLX5_ST_SZ_DW(set_pp_rate_limit_context)];
80 MLX5_ASSERT(!sh->txpp.pp);
81 memset(&pp, 0, sizeof(pp));
82 rate = NS_PER_S / sh->txpp.tick;
83 if (rate * sh->txpp.tick != NS_PER_S)
84 DRV_LOG(WARNING, "Packet pacing frequency is not precise.");
88 len = RTE_MAX(MLX5_TXPP_TEST_PKT_SIZE,
89 (size_t)RTE_ETHER_MIN_LEN);
90 MLX5_SET(set_pp_rate_limit_context, &pp,
91 burst_upper_bound, len);
92 MLX5_SET(set_pp_rate_limit_context, &pp,
93 typical_packet_size, len);
94 /* Convert packets per second into kilobits. */
95 rate = (rate * len) / (1000ul / CHAR_BIT);
96 DRV_LOG(INFO, "Packet pacing rate set to %" PRIu64, rate);
98 MLX5_SET(set_pp_rate_limit_context, &pp, rate_limit, rate);
99 MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
100 sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
101 sh->txpp.pp = mlx5_glue->dv_alloc_pp
102 (sh->ctx, sizeof(pp), &pp,
103 MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
104 if (sh->txpp.pp == NULL) {
105 DRV_LOG(ERR, "Failed to allocate packet pacing index.");
109 if (!((struct mlx5dv_pp *)sh->txpp.pp)->index) {
110 DRV_LOG(ERR, "Zero packet pacing index allocated.");
111 mlx5_txpp_free_pp_index(sh);
115 sh->txpp.pp_id = ((struct mlx5dv_pp *)(sh->txpp.pp))->index;
119 DRV_LOG(ERR, "Allocating pacing index is not supported.");
126 mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
129 claim_zero(mlx5_devx_cmd_destroy(wq->sq));
131 claim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem));
133 mlx5_free((void *)(uintptr_t)wq->sq_buf);
135 claim_zero(mlx5_devx_cmd_destroy(wq->cq));
137 claim_zero(mlx5_glue->devx_umem_dereg(wq->cq_umem));
139 mlx5_free((void *)(uintptr_t)wq->cq_buf);
140 memset(wq, 0, sizeof(*wq));
144 mlx5_txpp_destroy_rearm_queue(struct mlx5_dev_ctx_shared *sh)
146 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
148 mlx5_txpp_destroy_send_queue(wq);
152 mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh)
154 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
156 mlx5_txpp_destroy_send_queue(wq);
158 mlx5_free(sh->txpp.tsa);
164 mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci)
166 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
174 cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32
175 (wq->wqes[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8);
176 cs.w32[1] = wq->wqes[ci & (wq->sq_size - 1)].ctrl[1];
177 /* Update SQ doorbell record with new SQ ci. */
178 rte_compiler_barrier();
179 *wq->sq_dbrec = rte_cpu_to_be_32(wq->sq_ci);
180 /* Make sure the doorbell record is updated. */
182 /* Write to doorbel register to start processing. */
183 reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
184 __mlx5_uar_write64_relaxed(cs.w64, reg_addr, NULL);
189 mlx5_txpp_fill_cqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
191 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
192 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
195 for (i = 0; i < MLX5_TXPP_REARM_CQ_SIZE; i++) {
196 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
202 mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
204 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
205 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
208 for (i = 0; i < wq->sq_size; i += 2) {
209 struct mlx5_wqe_cseg *cs;
210 struct mlx5_wqe_qseg *qs;
213 /* Build SEND_EN request with slave WQE index. */
214 cs = &wqe[i + 0].cseg;
215 cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0);
216 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
217 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS <<
218 MLX5_COMP_MODE_OFFSET);
219 cs->misc = RTE_BE32(0);
220 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
221 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) &
222 ((1 << MLX5_WQ_INDEX_WIDTH) - 1);
223 qs->max_index = rte_cpu_to_be_32(index);
224 qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.sq->id);
225 /* Build WAIT request with slave CQE index. */
226 cs = &wqe[i + 1].cseg;
227 cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0);
228 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
229 cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR <<
230 MLX5_COMP_MODE_OFFSET);
231 cs->misc = RTE_BE32(0);
232 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
233 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) &
234 ((1 << MLX5_CQ_INDEX_WIDTH) - 1);
235 qs->max_index = rte_cpu_to_be_32(index);
236 qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.cq->id);
240 /* Creates the Rearm Queue to fire the requests to Clock Queue in realtime. */
242 mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
244 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
245 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
246 struct mlx5_devx_cq_attr cq_attr = { 0 };
247 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
249 uint32_t umem_size, umem_dbrec;
252 page_size = rte_mem_page_size();
253 if (page_size == (size_t)-1) {
254 DRV_LOG(ERR, "Failed to get mem page size");
257 /* Allocate memory buffer for CQEs and doorbell record. */
258 umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE;
259 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
260 umem_size += MLX5_DBR_SIZE;
261 wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
262 page_size, sh->numa_node);
264 DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
267 /* Register allocated buffer in user space with DevX. */
268 wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
269 (void *)(uintptr_t)wq->cq_buf,
271 IBV_ACCESS_LOCAL_WRITE);
274 DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
277 /* Create completion queue object for Rearm Queue. */
278 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
279 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
280 cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
281 cq_attr.eqn = sh->eqn;
282 cq_attr.q_umem_valid = 1;
283 cq_attr.q_umem_offset = 0;
284 cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
285 cq_attr.db_umem_valid = 1;
286 cq_attr.db_umem_offset = umem_dbrec;
287 cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
288 cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_REARM_CQ_SIZE);
289 cq_attr.log_page_size = rte_log2_u32(page_size);
290 wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
293 DRV_LOG(ERR, "Failed to create CQ for Rearm Queue.");
296 wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
299 /* Mark all CQEs initially as invalid. */
300 mlx5_txpp_fill_cqe_rearm_queue(sh);
302 * Allocate memory buffer for Send Queue WQEs.
303 * There should be no WQE leftovers in the cyclic queue.
305 wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE;
306 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
307 umem_size = MLX5_WQE_SIZE * wq->sq_size;
308 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
309 umem_size += MLX5_DBR_SIZE;
310 wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
311 page_size, sh->numa_node);
313 DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
317 /* Register allocated buffer in user space with DevX. */
318 wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
319 (void *)(uintptr_t)wq->sq_buf,
321 IBV_ACCESS_LOCAL_WRITE);
324 DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
327 /* Create send queue object for Rearm Queue. */
328 sq_attr.state = MLX5_SQC_STATE_RST;
329 sq_attr.tis_lst_sz = 1;
330 sq_attr.tis_num = sh->tis->id;
331 sq_attr.cqn = wq->cq->id;
332 sq_attr.cd_master = 1;
333 sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
334 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
335 sq_attr.wq_attr.pd = sh->pdn;
336 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
337 sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
338 sq_attr.wq_attr.dbr_umem_valid = 1;
339 sq_attr.wq_attr.dbr_addr = umem_dbrec;
340 sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
341 sq_attr.wq_attr.wq_umem_valid = 1;
342 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
343 sq_attr.wq_attr.wq_umem_offset = 0;
344 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
347 DRV_LOG(ERR, "Failed to create SQ for Rearm Queue.");
350 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
351 MLX5_SND_DBR * sizeof(uint32_t));
352 /* Build the WQEs in the Send Queue before goto Ready state. */
353 mlx5_txpp_fill_wqe_rearm_queue(sh);
354 /* Change queue state to ready. */
355 msq_attr.sq_state = MLX5_SQC_STATE_RST;
356 msq_attr.state = MLX5_SQC_STATE_RDY;
357 ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
359 DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue.");
365 mlx5_txpp_destroy_rearm_queue(sh);
371 mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
373 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
374 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
375 struct mlx5_wqe_cseg *cs = &wqe->cseg;
376 uint32_t wqe_size, opcode, i;
379 /* For test purposes fill the WQ with SEND inline packet. */
381 wqe_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
383 2 * MLX5_WQE_ESEG_SIZE -
384 MLX5_ESEG_MIN_INLINE_SIZE,
386 opcode = MLX5_OPCODE_SEND;
388 wqe_size = MLX5_WSEG_SIZE;
389 opcode = MLX5_OPCODE_NOP;
391 cs->opcode = rte_cpu_to_be_32(opcode | 0); /* Index is ignored. */
392 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) |
393 (wqe_size / MLX5_WSEG_SIZE));
394 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
395 cs->misc = RTE_BE32(0);
396 wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE);
398 struct mlx5_wqe_eseg *es = &wqe->eseg;
399 struct rte_ether_hdr *eth_hdr;
400 struct rte_ipv4_hdr *ip_hdr;
401 struct rte_udp_hdr *udp_hdr;
403 /* Build the inline test packet pattern. */
404 MLX5_ASSERT(wqe_size <= MLX5_WQE_SIZE_MAX);
405 MLX5_ASSERT(MLX5_TXPP_TEST_PKT_SIZE >=
406 (sizeof(struct rte_ether_hdr) +
407 sizeof(struct rte_ipv4_hdr)));
409 es->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
414 es->inline_hdr_sz = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE);
415 /* Build test packet L2 header (Ethernet). */
416 dst = (uint8_t *)&es->inline_data;
417 eth_hdr = (struct rte_ether_hdr *)dst;
418 rte_eth_random_addr(ð_hdr->d_addr.addr_bytes[0]);
419 rte_eth_random_addr(ð_hdr->s_addr.addr_bytes[0]);
420 eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
421 /* Build test packet L3 header (IP v4). */
422 dst += sizeof(struct rte_ether_hdr);
423 ip_hdr = (struct rte_ipv4_hdr *)dst;
424 ip_hdr->version_ihl = RTE_IPV4_VHL_DEF;
425 ip_hdr->type_of_service = 0;
426 ip_hdr->fragment_offset = 0;
427 ip_hdr->time_to_live = 64;
428 ip_hdr->next_proto_id = IPPROTO_UDP;
429 ip_hdr->packet_id = 0;
430 ip_hdr->total_length = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
431 sizeof(struct rte_ether_hdr));
432 /* use RFC5735 / RFC2544 reserved network test addresses */
433 ip_hdr->src_addr = RTE_BE32((198U << 24) | (18 << 16) |
435 ip_hdr->dst_addr = RTE_BE32((198U << 24) | (18 << 16) |
437 if (MLX5_TXPP_TEST_PKT_SIZE <
438 (sizeof(struct rte_ether_hdr) +
439 sizeof(struct rte_ipv4_hdr) +
440 sizeof(struct rte_udp_hdr)))
442 /* Build test packet L4 header (UDP). */
443 dst += sizeof(struct rte_ipv4_hdr);
444 udp_hdr = (struct rte_udp_hdr *)dst;
445 udp_hdr->src_port = RTE_BE16(9); /* RFC863 Discard. */
446 udp_hdr->dst_port = RTE_BE16(9);
447 udp_hdr->dgram_len = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
448 sizeof(struct rte_ether_hdr) -
449 sizeof(struct rte_ipv4_hdr));
450 udp_hdr->dgram_cksum = 0;
451 /* Fill the test packet data. */
452 dst += sizeof(struct rte_udp_hdr);
453 for (i = sizeof(struct rte_ether_hdr) +
454 sizeof(struct rte_ipv4_hdr) +
455 sizeof(struct rte_udp_hdr);
456 i < MLX5_TXPP_TEST_PKT_SIZE; i++)
457 *dst++ = (uint8_t)(i & 0xFF);
460 /* Duplicate the pattern to the next WQEs. */
461 dst = (uint8_t *)(uintptr_t)wq->sq_buf;
462 for (i = 1; i < MLX5_TXPP_CLKQ_SIZE; i++) {
464 rte_memcpy(dst, (void *)(uintptr_t)wq->sq_buf, wqe_size);
468 /* Creates the Clock Queue for packet pacing, returns zero on success. */
470 mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
472 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
473 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
474 struct mlx5_devx_cq_attr cq_attr = { 0 };
475 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
477 uint32_t umem_size, umem_dbrec;
480 page_size = rte_mem_page_size();
481 if (page_size == (size_t)-1) {
482 DRV_LOG(ERR, "Failed to get mem page size");
485 sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
486 MLX5_TXPP_REARM_SQ_SIZE *
487 sizeof(struct mlx5_txpp_ts),
490 DRV_LOG(ERR, "Failed to allocate memory for CQ stats.");
495 /* Allocate memory buffer for CQEs and doorbell record. */
496 umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE;
497 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
498 umem_size += MLX5_DBR_SIZE;
499 wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
500 page_size, sh->numa_node);
502 DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
505 /* Register allocated buffer in user space with DevX. */
506 wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
507 (void *)(uintptr_t)wq->cq_buf,
509 IBV_ACCESS_LOCAL_WRITE);
512 DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
515 /* Create completion queue object for Clock Queue. */
516 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
517 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
518 cq_attr.use_first_only = 1;
519 cq_attr.overrun_ignore = 1;
520 cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
521 cq_attr.eqn = sh->eqn;
522 cq_attr.q_umem_valid = 1;
523 cq_attr.q_umem_offset = 0;
524 cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
525 cq_attr.db_umem_valid = 1;
526 cq_attr.db_umem_offset = umem_dbrec;
527 cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
528 cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_CLKQ_SIZE);
529 cq_attr.log_page_size = rte_log2_u32(page_size);
530 wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
533 DRV_LOG(ERR, "Failed to create CQ for Clock Queue.");
536 wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
538 /* Allocate memory buffer for Send Queue WQEs. */
540 wq->sq_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
542 2 * MLX5_WQE_ESEG_SIZE -
543 MLX5_ESEG_MIN_INLINE_SIZE,
544 MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
545 wq->sq_size *= MLX5_TXPP_CLKQ_SIZE;
547 wq->sq_size = MLX5_TXPP_CLKQ_SIZE;
549 /* There should not be WQE leftovers in the cyclic queue. */
550 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
551 umem_size = MLX5_WQE_SIZE * wq->sq_size;
552 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
553 umem_size += MLX5_DBR_SIZE;
554 wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
555 page_size, sh->numa_node);
557 DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
561 /* Register allocated buffer in user space with DevX. */
562 wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
563 (void *)(uintptr_t)wq->sq_buf,
565 IBV_ACCESS_LOCAL_WRITE);
568 DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
571 /* Create send queue object for Clock Queue. */
573 sq_attr.tis_lst_sz = 1;
574 sq_attr.tis_num = sh->tis->id;
575 sq_attr.non_wire = 0;
576 sq_attr.static_sq_wq = 1;
578 sq_attr.non_wire = 1;
579 sq_attr.static_sq_wq = 1;
581 sq_attr.state = MLX5_SQC_STATE_RST;
582 sq_attr.cqn = wq->cq->id;
583 sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
584 sq_attr.wq_attr.cd_slave = 1;
585 sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
586 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
587 sq_attr.wq_attr.pd = sh->pdn;
588 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
589 sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
590 sq_attr.wq_attr.dbr_umem_valid = 1;
591 sq_attr.wq_attr.dbr_addr = umem_dbrec;
592 sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
593 sq_attr.wq_attr.wq_umem_valid = 1;
594 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
595 /* umem_offset must be zero for static_sq_wq queue. */
596 sq_attr.wq_attr.wq_umem_offset = 0;
597 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
600 DRV_LOG(ERR, "Failed to create SQ for Clock Queue.");
603 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
604 MLX5_SND_DBR * sizeof(uint32_t));
605 /* Build the WQEs in the Send Queue before goto Ready state. */
606 mlx5_txpp_fill_wqe_clock_queue(sh);
607 /* Change queue state to ready. */
608 msq_attr.sq_state = MLX5_SQC_STATE_RST;
609 msq_attr.state = MLX5_SQC_STATE_RDY;
611 ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
613 DRV_LOG(ERR, "Failed to set SQ ready state Clock Queue.");
619 mlx5_txpp_destroy_clock_queue(sh);
624 /* Enable notification from the Rearm Queue CQ. */
626 mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh)
630 struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;
631 uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;
632 uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;
633 uint64_t db_be = rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq->id);
634 base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
635 uint32_t *addr = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);
637 rte_compiler_barrier();
638 aq->cq_dbrec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);
641 *(uint64_t *)addr = db_be;
643 *(uint32_t *)addr = db_be;
645 *((uint32_t *)addr + 1) = db_be >> 32;
650 #if defined(RTE_ARCH_X86_64)
652 mlx5_atomic128_compare_exchange(rte_int128_t *dst,
654 const rte_int128_t *src)
658 asm volatile (MPLOCKED
661 : [dst] "=m" (dst->val[0]),
677 mlx5_atomic_read_cqe(rte_int128_t *from, rte_int128_t *ts)
680 * The only CQE of Clock Queue is being continuously
681 * update by hardware with soecified rate. We have to
682 * read timestump and WQE completion index atomically.
684 #if defined(RTE_ARCH_X86_64)
687 memset(&src, 0, sizeof(src));
689 /* if (*from == *ts) *from = *src else *ts = *from; */
690 mlx5_atomic128_compare_exchange(from, ts, &src);
692 uint64_t *cqe = (uint64_t *)from;
695 * Power architecture does not support 16B compare-and-swap.
696 * ARM implements it in software, code below is more relevant.
702 rte_compiler_barrier();
703 tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
704 op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
705 rte_compiler_barrier();
706 if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
708 if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
718 /* Stores timestamp in the cache structure to share data with datapath. */
720 mlx5_txpp_cache_timestamp(struct mlx5_dev_ctx_shared *sh,
721 uint64_t ts, uint64_t ci)
723 ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
724 ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
725 rte_compiler_barrier();
726 __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
727 __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
731 /* Reads timestamp from Clock Queue CQE and stores in the cache. */
733 mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh)
735 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
736 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
739 struct mlx5_cqe_ts cts;
744 static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),
745 "Wrong timestamp CQE part size");
746 mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
747 if (to.cts.op_own >> 4) {
748 DRV_LOG(DEBUG, "Clock Queue error sync lost.");
749 __atomic_fetch_add(&sh->txpp.err_clock_queue,
750 1, __ATOMIC_RELAXED);
751 sh->txpp.sync_lost = 1;
754 ci = rte_be_to_cpu_16(to.cts.wqe_counter);
755 ts = rte_be_to_cpu_64(to.cts.timestamp);
756 ts = mlx5_txpp_convert_rx_ts(sh, ts);
757 wq->cq_ci += (ci - wq->sq_ci) & UINT16_MAX;
759 mlx5_txpp_cache_timestamp(sh, ts, wq->cq_ci);
762 /* Waits for the first completion on Clock Queue to init timestamp. */
764 mlx5_txpp_init_timestamp(struct mlx5_dev_ctx_shared *sh)
766 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
771 for (wait = 0; wait < MLX5_TXPP_WAIT_INIT_TS; wait++) {
772 struct timespec onems;
774 mlx5_txpp_update_timestamp(sh);
777 /* Wait one millisecond and try again. */
779 onems.tv_nsec = NS_PER_S / MS_PER_S;
780 nanosleep(&onems, 0);
782 DRV_LOG(ERR, "Unable to initialize timestamp.");
783 sh->txpp.sync_lost = 1;
786 #ifdef HAVE_IBV_DEVX_EVENT
787 /* Gather statistics for timestamp from Clock Queue CQE. */
789 mlx5_txpp_gather_timestamp(struct mlx5_dev_ctx_shared *sh)
791 /* Check whether we have a valid timestamp. */
792 if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
794 MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
795 __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
796 sh->txpp.ts.ts, __ATOMIC_RELAXED);
797 __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
798 sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
799 if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
801 if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
805 /* Handles Rearm Queue completions in periodic service. */
806 static __rte_always_inline void
807 mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh)
809 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
810 uint32_t cq_ci = wq->cq_ci;
815 volatile struct mlx5_cqe *cqe;
817 cqe = &wq->cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)];
818 ret = check_cqe(cqe, MLX5_TXPP_REARM_CQ_SIZE, cq_ci);
820 case MLX5_CQE_STATUS_ERR:
824 case MLX5_CQE_STATUS_SW_OWN:
828 case MLX5_CQE_STATUS_HW_OWN:
834 } while (ret != MLX5_CQE_STATUS_HW_OWN);
835 if (likely(cq_ci != wq->cq_ci)) {
836 /* Check whether we have missed interrupts. */
837 if (cq_ci - wq->cq_ci != 1) {
838 DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
839 __atomic_fetch_add(&sh->txpp.err_miss_int,
840 1, __ATOMIC_RELAXED);
841 /* Check sync lost on wqe index. */
842 if (cq_ci - wq->cq_ci >=
843 (((1UL << MLX5_WQ_INDEX_WIDTH) /
844 MLX5_TXPP_REARM) - 1))
847 /* Update doorbell record to notify hardware. */
848 rte_compiler_barrier();
849 *wq->cq_dbrec = rte_cpu_to_be_32(cq_ci);
852 /* Fire new requests to Rearm Queue. */
854 DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
855 __atomic_fetch_add(&sh->txpp.err_rearm_queue,
856 1, __ATOMIC_RELAXED);
857 sh->txpp.sync_lost = 1;
862 /* Handles Clock Queue completions in periodic service. */
863 static __rte_always_inline void
864 mlx5_txpp_handle_clock_queue(struct mlx5_dev_ctx_shared *sh)
866 mlx5_txpp_update_timestamp(sh);
867 mlx5_txpp_gather_timestamp(sh);
871 /* Invoked periodically on Rearm Queue completions. */
873 mlx5_txpp_interrupt_handler(void *cb_arg)
875 #ifndef HAVE_IBV_DEVX_EVENT
876 RTE_SET_USED(cb_arg);
879 struct mlx5_dev_ctx_shared *sh = cb_arg;
881 struct mlx5dv_devx_async_event_hdr event_resp;
882 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
885 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
886 /* Process events in the loop. Only rearm completions are expected. */
887 while (mlx5_glue->devx_get_event
891 (ssize_t)sizeof(out.event_resp.cookie)) {
892 mlx5_txpp_handle_rearm_queue(sh);
893 mlx5_txpp_handle_clock_queue(sh);
894 mlx5_txpp_cq_arm(sh);
895 mlx5_txpp_doorbell_rearm_queue
896 (sh, sh->txpp.rearm_queue.sq_ci - 1);
898 #endif /* HAVE_IBV_DEVX_ASYNC */
902 mlx5_txpp_stop_service(struct mlx5_dev_ctx_shared *sh)
904 if (!sh->txpp.intr_handle.fd)
906 mlx5_intr_callback_unregister(&sh->txpp.intr_handle,
907 mlx5_txpp_interrupt_handler, sh);
908 sh->txpp.intr_handle.fd = 0;
911 /* Attach interrupt handler and fires first request to Rearm Queue. */
913 mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
915 uint16_t event_nums[1] = {0};
919 sh->txpp.err_miss_int = 0;
920 sh->txpp.err_rearm_queue = 0;
921 sh->txpp.err_clock_queue = 0;
922 sh->txpp.err_ts_past = 0;
923 sh->txpp.err_ts_future = 0;
924 /* Attach interrupt handler to process Rearm Queue completions. */
925 fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
926 ret = mlx5_os_set_nonblock_channel_fd(fd);
928 DRV_LOG(ERR, "Failed to change event channel FD.");
932 memset(&sh->txpp.intr_handle, 0, sizeof(sh->txpp.intr_handle));
933 fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
934 sh->txpp.intr_handle.fd = fd;
935 sh->txpp.intr_handle.type = RTE_INTR_HANDLE_EXT;
936 if (rte_intr_callback_register(&sh->txpp.intr_handle,
937 mlx5_txpp_interrupt_handler, sh)) {
938 sh->txpp.intr_handle.fd = 0;
939 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
942 /* Subscribe CQ event to the event channel controlled by the driver. */
943 ret = mlx5_glue->devx_subscribe_devx_event(sh->txpp.echan,
944 sh->txpp.rearm_queue.cq->obj,
948 DRV_LOG(ERR, "Failed to subscribe CQE event.");
952 /* Enable interrupts in the CQ. */
953 mlx5_txpp_cq_arm(sh);
954 /* Fire the first request on Rearm Queue. */
955 mlx5_txpp_doorbell_rearm_queue(sh, sh->txpp.rearm_queue.sq_size - 1);
956 mlx5_txpp_init_timestamp(sh);
961 * The routine initializes the packet pacing infrastructure:
962 * - allocates PP context
965 * - attaches rearm interrupt handler
966 * - starts Clock Queue
968 * Returns 0 on success, negative otherwise
971 mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
973 int tx_pp = priv->config.tx_pp;
976 /* Store the requested pacing parameters. */
977 sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp;
978 sh->txpp.test = !!(tx_pp < 0);
979 sh->txpp.skew = priv->config.tx_skew;
980 sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
981 ret = mlx5_txpp_create_event_channel(sh);
984 ret = mlx5_txpp_alloc_pp_index(sh);
987 ret = mlx5_txpp_create_clock_queue(sh);
990 ret = mlx5_txpp_create_rearm_queue(sh);
993 ret = mlx5_txpp_start_service(sh);
998 mlx5_txpp_stop_service(sh);
999 mlx5_txpp_destroy_rearm_queue(sh);
1000 mlx5_txpp_destroy_clock_queue(sh);
1001 mlx5_txpp_free_pp_index(sh);
1002 mlx5_txpp_destroy_event_channel(sh);
1011 * The routine destroys the packet pacing infrastructure:
1012 * - detaches rearm interrupt handler
1018 mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh)
1020 mlx5_txpp_stop_service(sh);
1021 mlx5_txpp_destroy_rearm_queue(sh);
1022 mlx5_txpp_destroy_clock_queue(sh);
1023 mlx5_txpp_free_pp_index(sh);
1024 mlx5_txpp_destroy_event_channel(sh);
1031 * Creates and starts packet pacing infrastructure on specified device.
1034 * Pointer to Ethernet device structure.
1037 * 0 on success, a negative errno value otherwise and rte_errno is set.
1040 mlx5_txpp_start(struct rte_eth_dev *dev)
1042 struct mlx5_priv *priv = dev->data->dev_private;
1043 struct mlx5_dev_ctx_shared *sh = priv->sh;
1047 if (!priv->config.tx_pp) {
1048 /* Packet pacing is not requested for the device. */
1049 MLX5_ASSERT(priv->txpp_en == 0);
1052 if (priv->txpp_en) {
1053 /* Packet pacing is already enabled for the device. */
1054 MLX5_ASSERT(sh->txpp.refcnt);
1057 if (priv->config.tx_pp > 0) {
1058 ret = rte_mbuf_dynflag_lookup
1059 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1063 ret = pthread_mutex_lock(&sh->txpp.mutex);
1066 if (sh->txpp.refcnt) {
1070 err = mlx5_txpp_create(sh, priv);
1072 MLX5_ASSERT(sh->txpp.tick);
1074 sh->txpp.refcnt = 1;
1079 ret = pthread_mutex_unlock(&sh->txpp.mutex);
1086 * Stops and destroys packet pacing infrastructure on specified device.
1089 * Pointer to Ethernet device structure.
1092 * 0 on success, a negative errno value otherwise and rte_errno is set.
1095 mlx5_txpp_stop(struct rte_eth_dev *dev)
1097 struct mlx5_priv *priv = dev->data->dev_private;
1098 struct mlx5_dev_ctx_shared *sh = priv->sh;
1101 if (!priv->txpp_en) {
1102 /* Packet pacing is already disabled for the device. */
1106 ret = pthread_mutex_lock(&sh->txpp.mutex);
1109 MLX5_ASSERT(sh->txpp.refcnt);
1110 if (!sh->txpp.refcnt || --sh->txpp.refcnt)
1112 /* No references any more, do actual destroy. */
1113 mlx5_txpp_destroy(sh);
1114 ret = pthread_mutex_unlock(&sh->txpp.mutex);
1120 * Read the current clock counter of an Ethernet device
1122 * This returns the current raw clock value of an Ethernet device. It is
1123 * a raw amount of ticks, with no given time reference.
1124 * The value returned here is from the same clock than the one
1125 * filling timestamp field of Rx/Tx packets when using hardware timestamp
1126 * offload. Therefore it can be used to compute a precise conversion of
1127 * the device clock to the real time.
1130 * Pointer to Ethernet device structure.
1132 * Pointer to the uint64_t that holds the raw clock value.
1136 * - -ENOTSUP: The function is not supported in this mode. Requires
1137 * packet pacing module configured and started (tx_pp devarg)
1140 mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp)
1142 struct mlx5_priv *priv = dev->data->dev_private;
1143 struct mlx5_dev_ctx_shared *sh = priv->sh;
1146 if (sh->txpp.refcnt) {
1147 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
1148 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
1151 struct mlx5_cqe_ts cts;
1155 mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
1156 if (to.cts.op_own >> 4) {
1157 DRV_LOG(DEBUG, "Clock Queue error sync lost.");
1158 __atomic_fetch_add(&sh->txpp.err_clock_queue,
1159 1, __ATOMIC_RELAXED);
1160 sh->txpp.sync_lost = 1;
1163 ts = rte_be_to_cpu_64(to.cts.timestamp);
1164 ts = mlx5_txpp_convert_rx_ts(sh, ts);
1168 /* Not supported in isolated mode - kernel does not see the CQEs. */
1169 if (priv->isolated || rte_eal_process_type() != RTE_PROC_PRIMARY)
1171 ret = mlx5_read_clock(dev, timestamp);
1176 * DPDK callback to clear device extended statistics.
1179 * Pointer to Ethernet device structure.
1182 * 0 on success and stats is reset, negative errno value otherwise and
1185 int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
1187 struct mlx5_priv *priv = dev->data->dev_private;
1188 struct mlx5_dev_ctx_shared *sh = priv->sh;
1190 __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
1191 __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
1192 __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
1193 __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
1194 __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
1199 * Routine to retrieve names of extended device statistics
1200 * for packet send scheduling. It appends the specific stats names
1201 * after the parts filled by preceding modules (eth stats, etc.)
1204 * Pointer to Ethernet device structure.
1205 * @param[out] xstats_names
1206 * Buffer to insert names into.
1210 * Number of names filled by preceding statistics modules.
1213 * Number of xstats names.
1215 int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1216 struct rte_eth_xstat_name *xstats_names,
1217 unsigned int n, unsigned int n_used)
1219 unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names);
1222 if (n >= n_used + n_txpp && xstats_names) {
1223 for (i = 0; i < n_txpp; ++i) {
1224 strncpy(xstats_names[i + n_used].name,
1225 mlx5_txpp_stat_names[i],
1226 RTE_ETH_XSTATS_NAME_SIZE);
1227 xstats_names[i + n_used].name
1228 [RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
1231 return n_used + n_txpp;
1235 mlx5_txpp_read_tsa(struct mlx5_dev_txpp *txpp,
1236 struct mlx5_txpp_ts *tsa, uint16_t idx)
1241 ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
1242 ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
1243 rte_compiler_barrier();
1244 if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
1246 if (__atomic_load_n(&txpp->tsa[idx].ts,
1247 __ATOMIC_RELAXED) != ts)
1249 if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
1250 __ATOMIC_RELAXED) != ci)
1259 * Jitter reflects the clock change between
1260 * neighbours Clock Queue completions.
1263 mlx5_txpp_xstats_jitter(struct mlx5_dev_txpp *txpp)
1265 struct mlx5_txpp_ts tsa0, tsa1;
1269 if (txpp->ts_n < 2) {
1270 /* No gathered enough reports yet. */
1277 rte_compiler_barrier();
1280 ts_0 += MLX5_TXPP_REARM_SQ_SIZE;
1283 ts_1 += MLX5_TXPP_REARM_SQ_SIZE;
1284 mlx5_txpp_read_tsa(txpp, &tsa0, ts_0);
1285 mlx5_txpp_read_tsa(txpp, &tsa1, ts_1);
1286 rte_compiler_barrier();
1287 } while (ts_p != txpp->ts_p);
1288 /* We have two neighbor reports, calculate the jitter. */
1289 dts = tsa1.ts - tsa0.ts;
1290 dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) -
1291 (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH));
1293 dci += 1 << MLX5_CQ_INDEX_WIDTH;
1295 return (dts > dci) ? dts - dci : dci - dts;
1299 * Wander reflects the long-term clock change
1300 * over the entire length of all Clock Queue completions.
1303 mlx5_txpp_xstats_wander(struct mlx5_dev_txpp *txpp)
1305 struct mlx5_txpp_ts tsa0, tsa1;
1309 if (txpp->ts_n < MLX5_TXPP_REARM_SQ_SIZE) {
1310 /* No gathered enough reports yet. */
1317 rte_compiler_barrier();
1318 ts_0 = ts_p - MLX5_TXPP_REARM_SQ_SIZE / 2 - 1;
1320 ts_0 += MLX5_TXPP_REARM_SQ_SIZE;
1323 ts_1 += MLX5_TXPP_REARM_SQ_SIZE;
1324 mlx5_txpp_read_tsa(txpp, &tsa0, ts_0);
1325 mlx5_txpp_read_tsa(txpp, &tsa1, ts_1);
1326 rte_compiler_barrier();
1327 } while (ts_p != txpp->ts_p);
1328 /* We have two neighbor reports, calculate the jitter. */
1329 dts = tsa1.ts - tsa0.ts;
1330 dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) -
1331 (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH));
1332 dci += 1 << MLX5_CQ_INDEX_WIDTH;
1334 return (dts > dci) ? dts - dci : dci - dts;
1338 * Routine to retrieve extended device statistics
1339 * for packet send scheduling. It appends the specific statistics
1340 * after the parts filled by preceding modules (eth stats, etc.)
1343 * Pointer to Ethernet device.
1345 * Pointer to rte extended stats table.
1347 * The size of the stats table.
1349 * Number of stats filled by preceding statistics modules.
1352 * Number of extended stats on success and stats is filled,
1353 * negative on error and rte_errno is set.
1356 mlx5_txpp_xstats_get(struct rte_eth_dev *dev,
1357 struct rte_eth_xstat *stats,
1358 unsigned int n, unsigned int n_used)
1360 unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names);
1362 if (n >= n_used + n_txpp && stats) {
1363 struct mlx5_priv *priv = dev->data->dev_private;
1364 struct mlx5_dev_ctx_shared *sh = priv->sh;
1367 for (i = 0; i < n_txpp; ++i)
1368 stats[n_used + i].id = n_used + i;
1369 stats[n_used + 0].value =
1370 __atomic_load_n(&sh->txpp.err_miss_int,
1372 stats[n_used + 1].value =
1373 __atomic_load_n(&sh->txpp.err_rearm_queue,
1375 stats[n_used + 2].value =
1376 __atomic_load_n(&sh->txpp.err_clock_queue,
1378 stats[n_used + 3].value =
1379 __atomic_load_n(&sh->txpp.err_ts_past,
1381 stats[n_used + 4].value =
1382 __atomic_load_n(&sh->txpp.err_ts_future,
1384 stats[n_used + 5].value = mlx5_txpp_xstats_jitter(&sh->txpp);
1385 stats[n_used + 6].value = mlx5_txpp_xstats_wander(&sh->txpp);
1386 stats[n_used + 7].value = sh->txpp.sync_lost;
1388 return n_used + n_txpp;