1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
8 #include <rte_ethdev_driver.h>
9 #include <rte_interrupts.h>
10 #include <rte_alarm.h>
11 #include <rte_malloc.h>
12 #include <rte_cycles.h>
13 #include <rte_eal_paging.h>
15 #include <mlx5_malloc.h>
16 #include <mlx5_common_devx.h>
19 #include "mlx5_rxtx.h"
20 #include "mlx5_common_os.h"
22 static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),
23 "Wrong timestamp CQE part size");
25 static const char * const mlx5_txpp_stat_names[] = {
26 "tx_pp_missed_interrupt_errors", /* Missed service interrupt. */
27 "tx_pp_rearm_queue_errors", /* Rearm Queue errors. */
28 "tx_pp_clock_queue_errors", /* Clock Queue errors. */
29 "tx_pp_timestamp_past_errors", /* Timestamp in the past. */
30 "tx_pp_timestamp_future_errors", /* Timestamp in the distant future. */
31 "tx_pp_jitter", /* Timestamp jitter (one Clock Queue completion). */
32 "tx_pp_wander", /* Timestamp wander (half of Clock Queue CQEs). */
33 "tx_pp_sync_lost", /* Scheduling synchronization lost. */
36 /* Destroy Event Queue Notification Channel. */
38 mlx5_txpp_destroy_event_channel(struct mlx5_dev_ctx_shared *sh)
41 mlx5_os_devx_destroy_event_channel(sh->txpp.echan);
42 sh->txpp.echan = NULL;
46 /* Create Event Queue Notification Channel. */
48 mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
50 MLX5_ASSERT(!sh->txpp.echan);
51 sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->ctx,
52 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
53 if (!sh->txpp.echan) {
55 DRV_LOG(ERR, "Failed to create event channel %d.", rte_errno);
62 mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh)
64 #ifdef HAVE_MLX5DV_PP_ALLOC
66 mlx5_glue->dv_free_pp(sh->txpp.pp);
72 DRV_LOG(ERR, "Freeing pacing index is not supported.");
76 /* Allocate Packet Pacing index from kernel via mlx5dv call. */
78 mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
80 #ifdef HAVE_MLX5DV_PP_ALLOC
81 uint32_t pp[MLX5_ST_SZ_DW(set_pp_rate_limit_context)];
84 MLX5_ASSERT(!sh->txpp.pp);
85 memset(&pp, 0, sizeof(pp));
86 rate = NS_PER_S / sh->txpp.tick;
87 if (rate * sh->txpp.tick != NS_PER_S)
88 DRV_LOG(WARNING, "Packet pacing frequency is not precise.");
92 len = RTE_MAX(MLX5_TXPP_TEST_PKT_SIZE,
93 (size_t)RTE_ETHER_MIN_LEN);
94 MLX5_SET(set_pp_rate_limit_context, &pp,
95 burst_upper_bound, len);
96 MLX5_SET(set_pp_rate_limit_context, &pp,
97 typical_packet_size, len);
98 /* Convert packets per second into kilobits. */
99 rate = (rate * len) / (1000ul / CHAR_BIT);
100 DRV_LOG(INFO, "Packet pacing rate set to %" PRIu64, rate);
102 MLX5_SET(set_pp_rate_limit_context, &pp, rate_limit, rate);
103 MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
104 sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
105 sh->txpp.pp = mlx5_glue->dv_alloc_pp
106 (sh->ctx, sizeof(pp), &pp,
107 MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
108 if (sh->txpp.pp == NULL) {
109 DRV_LOG(ERR, "Failed to allocate packet pacing index.");
113 if (!((struct mlx5dv_pp *)sh->txpp.pp)->index) {
114 DRV_LOG(ERR, "Zero packet pacing index allocated.");
115 mlx5_txpp_free_pp_index(sh);
119 sh->txpp.pp_id = ((struct mlx5dv_pp *)(sh->txpp.pp))->index;
123 DRV_LOG(ERR, "Allocating pacing index is not supported.");
130 mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
133 claim_zero(mlx5_devx_cmd_destroy(wq->sq));
135 claim_zero(mlx5_os_umem_dereg(wq->sq_umem));
137 mlx5_free((void *)(uintptr_t)wq->sq_buf);
138 mlx5_devx_cq_destroy(&wq->cq_obj);
139 memset(wq, 0, sizeof(*wq));
143 mlx5_txpp_destroy_rearm_queue(struct mlx5_dev_ctx_shared *sh)
145 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
147 mlx5_txpp_destroy_send_queue(wq);
151 mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh)
153 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
155 mlx5_txpp_destroy_send_queue(wq);
157 mlx5_free(sh->txpp.tsa);
163 mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci)
165 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
173 cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32
174 (wq->wqes[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8);
175 cs.w32[1] = wq->wqes[ci & (wq->sq_size - 1)].ctrl[1];
176 /* Update SQ doorbell record with new SQ ci. */
177 rte_compiler_barrier();
178 *wq->sq_dbrec = rte_cpu_to_be_32(wq->sq_ci);
179 /* Make sure the doorbell record is updated. */
181 /* Write to doorbel register to start processing. */
182 reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
183 __mlx5_uar_write64_relaxed(cs.w64, reg_addr, NULL);
188 mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
190 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
191 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
194 for (i = 0; i < wq->sq_size; i += 2) {
195 struct mlx5_wqe_cseg *cs;
196 struct mlx5_wqe_qseg *qs;
199 /* Build SEND_EN request with slave WQE index. */
200 cs = &wqe[i + 0].cseg;
201 cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0);
202 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
203 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS <<
204 MLX5_COMP_MODE_OFFSET);
205 cs->misc = RTE_BE32(0);
206 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
207 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) &
208 ((1 << MLX5_WQ_INDEX_WIDTH) - 1);
209 qs->max_index = rte_cpu_to_be_32(index);
210 qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.sq->id);
211 /* Build WAIT request with slave CQE index. */
212 cs = &wqe[i + 1].cseg;
213 cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0);
214 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
215 cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR <<
216 MLX5_COMP_MODE_OFFSET);
217 cs->misc = RTE_BE32(0);
218 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
219 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) &
220 ((1 << MLX5_CQ_INDEX_WIDTH) - 1);
221 qs->max_index = rte_cpu_to_be_32(index);
223 rte_cpu_to_be_32(sh->txpp.clock_queue.cq_obj.cq->id);
227 /* Creates the Rearm Queue to fire the requests to Clock Queue in realtime. */
229 mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
231 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
232 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
233 struct mlx5_devx_cq_attr cq_attr = {
234 .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
236 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
238 uint32_t umem_size, umem_dbrec;
241 page_size = rte_mem_page_size();
242 if (page_size == (size_t)-1) {
243 DRV_LOG(ERR, "Failed to get mem page size");
246 /* Create completion queue object for Rearm Queue. */
247 ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
248 log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,
251 DRV_LOG(ERR, "Failed to create CQ for Rearm Queue.");
257 * Allocate memory buffer for Send Queue WQEs.
258 * There should be no WQE leftovers in the cyclic queue.
260 wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE;
261 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
262 umem_size = MLX5_WQE_SIZE * wq->sq_size;
263 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
264 umem_size += MLX5_DBR_SIZE;
265 wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
266 page_size, sh->numa_node);
268 DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
272 /* Register allocated buffer in user space with DevX. */
273 wq->sq_umem = mlx5_os_umem_reg(sh->ctx,
274 (void *)(uintptr_t)wq->sq_buf,
276 IBV_ACCESS_LOCAL_WRITE);
279 DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
282 /* Create send queue object for Rearm Queue. */
283 sq_attr.state = MLX5_SQC_STATE_RST;
284 sq_attr.tis_lst_sz = 1;
285 sq_attr.tis_num = sh->tis->id;
286 sq_attr.cqn = wq->cq_obj.cq->id;
287 sq_attr.cd_master = 1;
288 sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
289 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
290 sq_attr.wq_attr.pd = sh->pdn;
291 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
292 sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
293 sq_attr.wq_attr.dbr_umem_valid = 1;
294 sq_attr.wq_attr.dbr_addr = umem_dbrec;
295 sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
296 sq_attr.wq_attr.wq_umem_valid = 1;
297 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
298 sq_attr.wq_attr.wq_umem_offset = 0;
299 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
302 DRV_LOG(ERR, "Failed to create SQ for Rearm Queue.");
305 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
306 MLX5_SND_DBR * sizeof(uint32_t));
307 /* Build the WQEs in the Send Queue before goto Ready state. */
308 mlx5_txpp_fill_wqe_rearm_queue(sh);
309 /* Change queue state to ready. */
310 msq_attr.sq_state = MLX5_SQC_STATE_RST;
311 msq_attr.state = MLX5_SQC_STATE_RDY;
312 ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
314 DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue.");
320 mlx5_txpp_destroy_rearm_queue(sh);
326 mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
328 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
329 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
330 struct mlx5_wqe_cseg *cs = &wqe->cseg;
331 uint32_t wqe_size, opcode, i;
334 /* For test purposes fill the WQ with SEND inline packet. */
336 wqe_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
338 2 * MLX5_WQE_ESEG_SIZE -
339 MLX5_ESEG_MIN_INLINE_SIZE,
341 opcode = MLX5_OPCODE_SEND;
343 wqe_size = MLX5_WSEG_SIZE;
344 opcode = MLX5_OPCODE_NOP;
346 cs->opcode = rte_cpu_to_be_32(opcode | 0); /* Index is ignored. */
347 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) |
348 (wqe_size / MLX5_WSEG_SIZE));
349 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
350 cs->misc = RTE_BE32(0);
351 wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE);
353 struct mlx5_wqe_eseg *es = &wqe->eseg;
354 struct rte_ether_hdr *eth_hdr;
355 struct rte_ipv4_hdr *ip_hdr;
356 struct rte_udp_hdr *udp_hdr;
358 /* Build the inline test packet pattern. */
359 MLX5_ASSERT(wqe_size <= MLX5_WQE_SIZE_MAX);
360 MLX5_ASSERT(MLX5_TXPP_TEST_PKT_SIZE >=
361 (sizeof(struct rte_ether_hdr) +
362 sizeof(struct rte_ipv4_hdr)));
364 es->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
369 es->inline_hdr_sz = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE);
370 /* Build test packet L2 header (Ethernet). */
371 dst = (uint8_t *)&es->inline_data;
372 eth_hdr = (struct rte_ether_hdr *)dst;
373 rte_eth_random_addr(ð_hdr->d_addr.addr_bytes[0]);
374 rte_eth_random_addr(ð_hdr->s_addr.addr_bytes[0]);
375 eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
376 /* Build test packet L3 header (IP v4). */
377 dst += sizeof(struct rte_ether_hdr);
378 ip_hdr = (struct rte_ipv4_hdr *)dst;
379 ip_hdr->version_ihl = RTE_IPV4_VHL_DEF;
380 ip_hdr->type_of_service = 0;
381 ip_hdr->fragment_offset = 0;
382 ip_hdr->time_to_live = 64;
383 ip_hdr->next_proto_id = IPPROTO_UDP;
384 ip_hdr->packet_id = 0;
385 ip_hdr->total_length = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
386 sizeof(struct rte_ether_hdr));
387 /* use RFC5735 / RFC2544 reserved network test addresses */
388 ip_hdr->src_addr = RTE_BE32((198U << 24) | (18 << 16) |
390 ip_hdr->dst_addr = RTE_BE32((198U << 24) | (18 << 16) |
392 if (MLX5_TXPP_TEST_PKT_SIZE <
393 (sizeof(struct rte_ether_hdr) +
394 sizeof(struct rte_ipv4_hdr) +
395 sizeof(struct rte_udp_hdr)))
397 /* Build test packet L4 header (UDP). */
398 dst += sizeof(struct rte_ipv4_hdr);
399 udp_hdr = (struct rte_udp_hdr *)dst;
400 udp_hdr->src_port = RTE_BE16(9); /* RFC863 Discard. */
401 udp_hdr->dst_port = RTE_BE16(9);
402 udp_hdr->dgram_len = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
403 sizeof(struct rte_ether_hdr) -
404 sizeof(struct rte_ipv4_hdr));
405 udp_hdr->dgram_cksum = 0;
406 /* Fill the test packet data. */
407 dst += sizeof(struct rte_udp_hdr);
408 for (i = sizeof(struct rte_ether_hdr) +
409 sizeof(struct rte_ipv4_hdr) +
410 sizeof(struct rte_udp_hdr);
411 i < MLX5_TXPP_TEST_PKT_SIZE; i++)
412 *dst++ = (uint8_t)(i & 0xFF);
415 /* Duplicate the pattern to the next WQEs. */
416 dst = (uint8_t *)(uintptr_t)wq->sq_buf;
417 for (i = 1; i < MLX5_TXPP_CLKQ_SIZE; i++) {
419 rte_memcpy(dst, (void *)(uintptr_t)wq->sq_buf, wqe_size);
423 /* Creates the Clock Queue for packet pacing, returns zero on success. */
425 mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
427 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
428 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
429 struct mlx5_devx_cq_attr cq_attr = {
432 .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
434 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
436 uint32_t umem_size, umem_dbrec;
439 page_size = rte_mem_page_size();
440 if (page_size == (size_t)-1) {
441 DRV_LOG(ERR, "Failed to get mem page size");
444 sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
445 MLX5_TXPP_REARM_SQ_SIZE *
446 sizeof(struct mlx5_txpp_ts),
449 DRV_LOG(ERR, "Failed to allocate memory for CQ stats.");
454 /* Create completion queue object for Clock Queue. */
455 ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
456 log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr,
459 DRV_LOG(ERR, "Failed to create CQ for Clock Queue.");
463 /* Allocate memory buffer for Send Queue WQEs. */
465 wq->sq_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
467 2 * MLX5_WQE_ESEG_SIZE -
468 MLX5_ESEG_MIN_INLINE_SIZE,
469 MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
470 wq->sq_size *= MLX5_TXPP_CLKQ_SIZE;
472 wq->sq_size = MLX5_TXPP_CLKQ_SIZE;
474 /* There should not be WQE leftovers in the cyclic queue. */
475 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
476 umem_size = MLX5_WQE_SIZE * wq->sq_size;
477 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
478 umem_size += MLX5_DBR_SIZE;
479 wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
480 page_size, sh->numa_node);
482 DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
486 /* Register allocated buffer in user space with DevX. */
487 wq->sq_umem = mlx5_os_umem_reg(sh->ctx,
488 (void *)(uintptr_t)wq->sq_buf,
490 IBV_ACCESS_LOCAL_WRITE);
493 DRV_LOG(ERR, "Failed to register umem for Clock Queue.");
496 /* Create send queue object for Clock Queue. */
498 sq_attr.tis_lst_sz = 1;
499 sq_attr.tis_num = sh->tis->id;
500 sq_attr.non_wire = 0;
501 sq_attr.static_sq_wq = 1;
503 sq_attr.non_wire = 1;
504 sq_attr.static_sq_wq = 1;
506 sq_attr.state = MLX5_SQC_STATE_RST;
507 sq_attr.cqn = wq->cq_obj.cq->id;
508 sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
509 sq_attr.wq_attr.cd_slave = 1;
510 sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
511 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
512 sq_attr.wq_attr.pd = sh->pdn;
513 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
514 sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
515 sq_attr.wq_attr.dbr_umem_valid = 1;
516 sq_attr.wq_attr.dbr_addr = umem_dbrec;
517 sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
518 sq_attr.wq_attr.wq_umem_valid = 1;
519 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
520 /* umem_offset must be zero for static_sq_wq queue. */
521 sq_attr.wq_attr.wq_umem_offset = 0;
522 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
525 DRV_LOG(ERR, "Failed to create SQ for Clock Queue.");
528 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
529 MLX5_SND_DBR * sizeof(uint32_t));
530 /* Build the WQEs in the Send Queue before goto Ready state. */
531 mlx5_txpp_fill_wqe_clock_queue(sh);
532 /* Change queue state to ready. */
533 msq_attr.sq_state = MLX5_SQC_STATE_RST;
534 msq_attr.state = MLX5_SQC_STATE_RDY;
536 ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
538 DRV_LOG(ERR, "Failed to set SQ ready state Clock Queue.");
544 mlx5_txpp_destroy_clock_queue(sh);
549 /* Enable notification from the Rearm Queue CQ. */
551 mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh)
555 struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;
556 uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;
557 uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;
559 rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq_obj.cq->id);
560 base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
561 uint32_t *addr = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);
563 rte_compiler_barrier();
564 aq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);
567 *(uint64_t *)addr = db_be;
569 *(uint32_t *)addr = db_be;
571 *((uint32_t *)addr + 1) = db_be >> 32;
576 #if defined(RTE_ARCH_X86_64)
578 mlx5_atomic128_compare_exchange(rte_int128_t *dst,
580 const rte_int128_t *src)
584 asm volatile (MPLOCKED
587 : [dst] "=m" (dst->val[0]),
603 mlx5_atomic_read_cqe(rte_int128_t *from, rte_int128_t *ts)
606 * The only CQE of Clock Queue is being continuously
607 * update by hardware with soecified rate. We have to
608 * read timestump and WQE completion index atomically.
610 #if defined(RTE_ARCH_X86_64)
613 memset(&src, 0, sizeof(src));
615 /* if (*from == *ts) *from = *src else *ts = *from; */
616 mlx5_atomic128_compare_exchange(from, ts, &src);
618 uint64_t *cqe = (uint64_t *)from;
621 * Power architecture does not support 16B compare-and-swap.
622 * ARM implements it in software, code below is more relevant.
628 rte_compiler_barrier();
629 tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
630 op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
631 rte_compiler_barrier();
632 if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
634 if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
644 /* Stores timestamp in the cache structure to share data with datapath. */
646 mlx5_txpp_cache_timestamp(struct mlx5_dev_ctx_shared *sh,
647 uint64_t ts, uint64_t ci)
649 ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
650 ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
651 rte_compiler_barrier();
652 __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
653 __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
657 /* Reads timestamp from Clock Queue CQE and stores in the cache. */
659 mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh)
661 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
662 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;
665 struct mlx5_cqe_ts cts;
670 mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
671 if (to.cts.op_own >> 4) {
672 DRV_LOG(DEBUG, "Clock Queue error sync lost.");
673 __atomic_fetch_add(&sh->txpp.err_clock_queue,
674 1, __ATOMIC_RELAXED);
675 sh->txpp.sync_lost = 1;
678 ci = rte_be_to_cpu_16(to.cts.wqe_counter);
679 ts = rte_be_to_cpu_64(to.cts.timestamp);
680 ts = mlx5_txpp_convert_rx_ts(sh, ts);
681 wq->cq_ci += (ci - wq->sq_ci) & UINT16_MAX;
683 mlx5_txpp_cache_timestamp(sh, ts, wq->cq_ci);
686 /* Waits for the first completion on Clock Queue to init timestamp. */
688 mlx5_txpp_init_timestamp(struct mlx5_dev_ctx_shared *sh)
690 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
695 for (wait = 0; wait < MLX5_TXPP_WAIT_INIT_TS; wait++) {
696 mlx5_txpp_update_timestamp(sh);
699 /* Wait one millisecond and try again. */
700 rte_delay_us_sleep(US_PER_S / MS_PER_S);
702 DRV_LOG(ERR, "Unable to initialize timestamp.");
703 sh->txpp.sync_lost = 1;
706 #ifdef HAVE_IBV_DEVX_EVENT
707 /* Gather statistics for timestamp from Clock Queue CQE. */
709 mlx5_txpp_gather_timestamp(struct mlx5_dev_ctx_shared *sh)
711 /* Check whether we have a valid timestamp. */
712 if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
714 MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
715 __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
716 sh->txpp.ts.ts, __ATOMIC_RELAXED);
717 __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
718 sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
719 if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
721 if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
725 /* Handles Rearm Queue completions in periodic service. */
726 static __rte_always_inline void
727 mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh)
729 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
730 uint32_t cq_ci = wq->cq_ci;
735 volatile struct mlx5_cqe *cqe;
737 cqe = &wq->cq_obj.cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)];
738 ret = check_cqe(cqe, MLX5_TXPP_REARM_CQ_SIZE, cq_ci);
740 case MLX5_CQE_STATUS_ERR:
744 case MLX5_CQE_STATUS_SW_OWN:
748 case MLX5_CQE_STATUS_HW_OWN:
754 } while (ret != MLX5_CQE_STATUS_HW_OWN);
755 if (likely(cq_ci != wq->cq_ci)) {
756 /* Check whether we have missed interrupts. */
757 if (cq_ci - wq->cq_ci != 1) {
758 DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
759 __atomic_fetch_add(&sh->txpp.err_miss_int,
760 1, __ATOMIC_RELAXED);
761 /* Check sync lost on wqe index. */
762 if (cq_ci - wq->cq_ci >=
763 (((1UL << MLX5_WQ_INDEX_WIDTH) /
764 MLX5_TXPP_REARM) - 1))
767 /* Update doorbell record to notify hardware. */
768 rte_compiler_barrier();
769 *wq->cq_obj.db_rec = rte_cpu_to_be_32(cq_ci);
772 /* Fire new requests to Rearm Queue. */
774 DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
775 __atomic_fetch_add(&sh->txpp.err_rearm_queue,
776 1, __ATOMIC_RELAXED);
777 sh->txpp.sync_lost = 1;
782 /* Handles Clock Queue completions in periodic service. */
783 static __rte_always_inline void
784 mlx5_txpp_handle_clock_queue(struct mlx5_dev_ctx_shared *sh)
786 mlx5_txpp_update_timestamp(sh);
787 mlx5_txpp_gather_timestamp(sh);
791 /* Invoked periodically on Rearm Queue completions. */
793 mlx5_txpp_interrupt_handler(void *cb_arg)
795 #ifndef HAVE_IBV_DEVX_EVENT
796 RTE_SET_USED(cb_arg);
799 struct mlx5_dev_ctx_shared *sh = cb_arg;
801 struct mlx5dv_devx_async_event_hdr event_resp;
802 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
805 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
806 /* Process events in the loop. Only rearm completions are expected. */
807 while (mlx5_glue->devx_get_event
811 (ssize_t)sizeof(out.event_resp.cookie)) {
812 mlx5_txpp_handle_rearm_queue(sh);
813 mlx5_txpp_handle_clock_queue(sh);
814 mlx5_txpp_cq_arm(sh);
815 mlx5_txpp_doorbell_rearm_queue
816 (sh, sh->txpp.rearm_queue.sq_ci - 1);
818 #endif /* HAVE_IBV_DEVX_ASYNC */
822 mlx5_txpp_stop_service(struct mlx5_dev_ctx_shared *sh)
824 if (!sh->txpp.intr_handle.fd)
826 mlx5_intr_callback_unregister(&sh->txpp.intr_handle,
827 mlx5_txpp_interrupt_handler, sh);
828 sh->txpp.intr_handle.fd = 0;
831 /* Attach interrupt handler and fires first request to Rearm Queue. */
833 mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
835 uint16_t event_nums[1] = {0};
839 sh->txpp.err_miss_int = 0;
840 sh->txpp.err_rearm_queue = 0;
841 sh->txpp.err_clock_queue = 0;
842 sh->txpp.err_ts_past = 0;
843 sh->txpp.err_ts_future = 0;
844 /* Attach interrupt handler to process Rearm Queue completions. */
845 fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
846 ret = mlx5_os_set_nonblock_channel_fd(fd);
848 DRV_LOG(ERR, "Failed to change event channel FD.");
852 memset(&sh->txpp.intr_handle, 0, sizeof(sh->txpp.intr_handle));
853 fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
854 sh->txpp.intr_handle.fd = fd;
855 sh->txpp.intr_handle.type = RTE_INTR_HANDLE_EXT;
856 if (rte_intr_callback_register(&sh->txpp.intr_handle,
857 mlx5_txpp_interrupt_handler, sh)) {
858 sh->txpp.intr_handle.fd = 0;
859 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
862 /* Subscribe CQ event to the event channel controlled by the driver. */
863 ret = mlx5_os_devx_subscribe_devx_event(sh->txpp.echan,
864 sh->txpp.rearm_queue.cq_obj.cq->obj,
865 sizeof(event_nums), event_nums, 0);
867 DRV_LOG(ERR, "Failed to subscribe CQE event.");
871 /* Enable interrupts in the CQ. */
872 mlx5_txpp_cq_arm(sh);
873 /* Fire the first request on Rearm Queue. */
874 mlx5_txpp_doorbell_rearm_queue(sh, sh->txpp.rearm_queue.sq_size - 1);
875 mlx5_txpp_init_timestamp(sh);
880 * The routine initializes the packet pacing infrastructure:
881 * - allocates PP context
884 * - attaches rearm interrupt handler
885 * - starts Clock Queue
887 * Returns 0 on success, negative otherwise
890 mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
892 int tx_pp = priv->config.tx_pp;
895 /* Store the requested pacing parameters. */
896 sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp;
897 sh->txpp.test = !!(tx_pp < 0);
898 sh->txpp.skew = priv->config.tx_skew;
899 sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
900 ret = mlx5_txpp_create_event_channel(sh);
903 ret = mlx5_txpp_alloc_pp_index(sh);
906 ret = mlx5_txpp_create_clock_queue(sh);
909 ret = mlx5_txpp_create_rearm_queue(sh);
912 ret = mlx5_txpp_start_service(sh);
917 mlx5_txpp_stop_service(sh);
918 mlx5_txpp_destroy_rearm_queue(sh);
919 mlx5_txpp_destroy_clock_queue(sh);
920 mlx5_txpp_free_pp_index(sh);
921 mlx5_txpp_destroy_event_channel(sh);
930 * The routine destroys the packet pacing infrastructure:
931 * - detaches rearm interrupt handler
937 mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh)
939 mlx5_txpp_stop_service(sh);
940 mlx5_txpp_destroy_rearm_queue(sh);
941 mlx5_txpp_destroy_clock_queue(sh);
942 mlx5_txpp_free_pp_index(sh);
943 mlx5_txpp_destroy_event_channel(sh);
950 * Creates and starts packet pacing infrastructure on specified device.
953 * Pointer to Ethernet device structure.
956 * 0 on success, a negative errno value otherwise and rte_errno is set.
959 mlx5_txpp_start(struct rte_eth_dev *dev)
961 struct mlx5_priv *priv = dev->data->dev_private;
962 struct mlx5_dev_ctx_shared *sh = priv->sh;
966 if (!priv->config.tx_pp) {
967 /* Packet pacing is not requested for the device. */
968 MLX5_ASSERT(priv->txpp_en == 0);
972 /* Packet pacing is already enabled for the device. */
973 MLX5_ASSERT(sh->txpp.refcnt);
976 if (priv->config.tx_pp > 0) {
977 ret = rte_mbuf_dynflag_lookup
978 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
982 ret = pthread_mutex_lock(&sh->txpp.mutex);
985 if (sh->txpp.refcnt) {
989 err = mlx5_txpp_create(sh, priv);
991 MLX5_ASSERT(sh->txpp.tick);
998 ret = pthread_mutex_unlock(&sh->txpp.mutex);
1005 * Stops and destroys packet pacing infrastructure on specified device.
1008 * Pointer to Ethernet device structure.
1011 * 0 on success, a negative errno value otherwise and rte_errno is set.
1014 mlx5_txpp_stop(struct rte_eth_dev *dev)
1016 struct mlx5_priv *priv = dev->data->dev_private;
1017 struct mlx5_dev_ctx_shared *sh = priv->sh;
1020 if (!priv->txpp_en) {
1021 /* Packet pacing is already disabled for the device. */
1025 ret = pthread_mutex_lock(&sh->txpp.mutex);
1028 MLX5_ASSERT(sh->txpp.refcnt);
1029 if (!sh->txpp.refcnt || --sh->txpp.refcnt)
1031 /* No references any more, do actual destroy. */
1032 mlx5_txpp_destroy(sh);
1033 ret = pthread_mutex_unlock(&sh->txpp.mutex);
1039 * Read the current clock counter of an Ethernet device
1041 * This returns the current raw clock value of an Ethernet device. It is
1042 * a raw amount of ticks, with no given time reference.
1043 * The value returned here is from the same clock than the one
1044 * filling timestamp field of Rx/Tx packets when using hardware timestamp
1045 * offload. Therefore it can be used to compute a precise conversion of
1046 * the device clock to the real time.
1049 * Pointer to Ethernet device structure.
1051 * Pointer to the uint64_t that holds the raw clock value.
1055 * - -ENOTSUP: The function is not supported in this mode. Requires
1056 * packet pacing module configured and started (tx_pp devarg)
1059 mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp)
1061 struct mlx5_priv *priv = dev->data->dev_private;
1062 struct mlx5_dev_ctx_shared *sh = priv->sh;
1065 if (sh->txpp.refcnt) {
1066 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
1067 struct mlx5_cqe *cqe =
1068 (struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;
1071 struct mlx5_cqe_ts cts;
1075 mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
1076 if (to.cts.op_own >> 4) {
1077 DRV_LOG(DEBUG, "Clock Queue error sync lost.");
1078 __atomic_fetch_add(&sh->txpp.err_clock_queue,
1079 1, __ATOMIC_RELAXED);
1080 sh->txpp.sync_lost = 1;
1083 ts = rte_be_to_cpu_64(to.cts.timestamp);
1084 ts = mlx5_txpp_convert_rx_ts(sh, ts);
1088 /* Not supported in isolated mode - kernel does not see the CQEs. */
1089 if (priv->isolated || rte_eal_process_type() != RTE_PROC_PRIMARY)
1091 ret = mlx5_read_clock(dev, timestamp);
1096 * DPDK callback to clear device extended statistics.
1099 * Pointer to Ethernet device structure.
1102 * 0 on success and stats is reset, negative errno value otherwise and
1105 int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
1107 struct mlx5_priv *priv = dev->data->dev_private;
1108 struct mlx5_dev_ctx_shared *sh = priv->sh;
1110 __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
1111 __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
1112 __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
1113 __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
1114 __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
1119 * Routine to retrieve names of extended device statistics
1120 * for packet send scheduling. It appends the specific stats names
1121 * after the parts filled by preceding modules (eth stats, etc.)
1124 * Pointer to Ethernet device structure.
1125 * @param[out] xstats_names
1126 * Buffer to insert names into.
1130 * Number of names filled by preceding statistics modules.
1133 * Number of xstats names.
1135 int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1136 struct rte_eth_xstat_name *xstats_names,
1137 unsigned int n, unsigned int n_used)
1139 unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names);
1142 if (n >= n_used + n_txpp && xstats_names) {
1143 for (i = 0; i < n_txpp; ++i) {
1144 strncpy(xstats_names[i + n_used].name,
1145 mlx5_txpp_stat_names[i],
1146 RTE_ETH_XSTATS_NAME_SIZE);
1147 xstats_names[i + n_used].name
1148 [RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
1151 return n_used + n_txpp;
1155 mlx5_txpp_read_tsa(struct mlx5_dev_txpp *txpp,
1156 struct mlx5_txpp_ts *tsa, uint16_t idx)
1161 ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
1162 ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
1163 rte_compiler_barrier();
1164 if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
1166 if (__atomic_load_n(&txpp->tsa[idx].ts,
1167 __ATOMIC_RELAXED) != ts)
1169 if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
1170 __ATOMIC_RELAXED) != ci)
1179 * Jitter reflects the clock change between
1180 * neighbours Clock Queue completions.
1183 mlx5_txpp_xstats_jitter(struct mlx5_dev_txpp *txpp)
1185 struct mlx5_txpp_ts tsa0, tsa1;
1189 if (txpp->ts_n < 2) {
1190 /* No gathered enough reports yet. */
1197 rte_compiler_barrier();
1200 ts_0 += MLX5_TXPP_REARM_SQ_SIZE;
1203 ts_1 += MLX5_TXPP_REARM_SQ_SIZE;
1204 mlx5_txpp_read_tsa(txpp, &tsa0, ts_0);
1205 mlx5_txpp_read_tsa(txpp, &tsa1, ts_1);
1206 rte_compiler_barrier();
1207 } while (ts_p != txpp->ts_p);
1208 /* We have two neighbor reports, calculate the jitter. */
1209 dts = tsa1.ts - tsa0.ts;
1210 dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) -
1211 (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH));
1213 dci += 1 << MLX5_CQ_INDEX_WIDTH;
1215 return (dts > dci) ? dts - dci : dci - dts;
1219 * Wander reflects the long-term clock change
1220 * over the entire length of all Clock Queue completions.
1223 mlx5_txpp_xstats_wander(struct mlx5_dev_txpp *txpp)
1225 struct mlx5_txpp_ts tsa0, tsa1;
1229 if (txpp->ts_n < MLX5_TXPP_REARM_SQ_SIZE) {
1230 /* No gathered enough reports yet. */
1237 rte_compiler_barrier();
1238 ts_0 = ts_p - MLX5_TXPP_REARM_SQ_SIZE / 2 - 1;
1240 ts_0 += MLX5_TXPP_REARM_SQ_SIZE;
1243 ts_1 += MLX5_TXPP_REARM_SQ_SIZE;
1244 mlx5_txpp_read_tsa(txpp, &tsa0, ts_0);
1245 mlx5_txpp_read_tsa(txpp, &tsa1, ts_1);
1246 rte_compiler_barrier();
1247 } while (ts_p != txpp->ts_p);
1248 /* We have two neighbor reports, calculate the jitter. */
1249 dts = tsa1.ts - tsa0.ts;
1250 dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) -
1251 (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH));
1252 dci += 1 << MLX5_CQ_INDEX_WIDTH;
1254 return (dts > dci) ? dts - dci : dci - dts;
1258 * Routine to retrieve extended device statistics
1259 * for packet send scheduling. It appends the specific statistics
1260 * after the parts filled by preceding modules (eth stats, etc.)
1263 * Pointer to Ethernet device.
1265 * Pointer to rte extended stats table.
1267 * The size of the stats table.
1269 * Number of stats filled by preceding statistics modules.
1272 * Number of extended stats on success and stats is filled,
1273 * negative on error and rte_errno is set.
1276 mlx5_txpp_xstats_get(struct rte_eth_dev *dev,
1277 struct rte_eth_xstat *stats,
1278 unsigned int n, unsigned int n_used)
1280 unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names);
1282 if (n >= n_used + n_txpp && stats) {
1283 struct mlx5_priv *priv = dev->data->dev_private;
1284 struct mlx5_dev_ctx_shared *sh = priv->sh;
1287 for (i = 0; i < n_txpp; ++i)
1288 stats[n_used + i].id = n_used + i;
1289 stats[n_used + 0].value =
1290 __atomic_load_n(&sh->txpp.err_miss_int,
1292 stats[n_used + 1].value =
1293 __atomic_load_n(&sh->txpp.err_rearm_queue,
1295 stats[n_used + 2].value =
1296 __atomic_load_n(&sh->txpp.err_clock_queue,
1298 stats[n_used + 3].value =
1299 __atomic_load_n(&sh->txpp.err_ts_past,
1301 stats[n_used + 4].value =
1302 __atomic_load_n(&sh->txpp.err_ts_future,
1304 stats[n_used + 5].value = mlx5_txpp_xstats_jitter(&sh->txpp);
1305 stats[n_used + 6].value = mlx5_txpp_xstats_wander(&sh->txpp);
1306 stats[n_used + 7].value = sh->txpp.sync_lost;
1308 return n_used + n_txpp;