1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
8 #include <ethdev_driver.h>
9 #include <rte_interrupts.h>
10 #include <rte_alarm.h>
11 #include <rte_malloc.h>
12 #include <rte_cycles.h>
13 #include <rte_eal_paging.h>
15 #include <mlx5_malloc.h>
16 #include <mlx5_common_devx.h>
21 #include "mlx5_common_os.h"
23 static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),
24 "Wrong timestamp CQE part size");
26 static const char * const mlx5_txpp_stat_names[] = {
27 "tx_pp_missed_interrupt_errors", /* Missed service interrupt. */
28 "tx_pp_rearm_queue_errors", /* Rearm Queue errors. */
29 "tx_pp_clock_queue_errors", /* Clock Queue errors. */
30 "tx_pp_timestamp_past_errors", /* Timestamp in the past. */
31 "tx_pp_timestamp_future_errors", /* Timestamp in the distant future. */
32 "tx_pp_jitter", /* Timestamp jitter (one Clock Queue completion). */
33 "tx_pp_wander", /* Timestamp wander (half of Clock Queue CQEs). */
34 "tx_pp_sync_lost", /* Scheduling synchronization lost. */
37 /* Destroy Event Queue Notification Channel. */
39 mlx5_txpp_destroy_event_channel(struct mlx5_dev_ctx_shared *sh)
42 mlx5_os_devx_destroy_event_channel(sh->txpp.echan);
43 sh->txpp.echan = NULL;
47 /* Create Event Queue Notification Channel. */
49 mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
51 MLX5_ASSERT(!sh->txpp.echan);
52 sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->cdev->ctx,
53 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
54 if (!sh->txpp.echan) {
56 DRV_LOG(ERR, "Failed to create event channel %d.", rte_errno);
63 mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh)
65 #ifdef HAVE_MLX5DV_PP_ALLOC
67 mlx5_glue->dv_free_pp(sh->txpp.pp);
73 DRV_LOG(ERR, "Freeing pacing index is not supported.");
77 /* Allocate Packet Pacing index from kernel via mlx5dv call. */
79 mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
81 #ifdef HAVE_MLX5DV_PP_ALLOC
82 uint32_t pp[MLX5_ST_SZ_DW(set_pp_rate_limit_context)];
85 MLX5_ASSERT(!sh->txpp.pp);
86 memset(&pp, 0, sizeof(pp));
87 rate = NS_PER_S / sh->txpp.tick;
88 if (rate * sh->txpp.tick != NS_PER_S)
89 DRV_LOG(WARNING, "Packet pacing frequency is not precise.");
93 len = RTE_MAX(MLX5_TXPP_TEST_PKT_SIZE,
94 (size_t)RTE_ETHER_MIN_LEN);
95 MLX5_SET(set_pp_rate_limit_context, &pp,
96 burst_upper_bound, len);
97 MLX5_SET(set_pp_rate_limit_context, &pp,
98 typical_packet_size, len);
99 /* Convert packets per second into kilobits. */
100 rate = (rate * len) / (1000ul / CHAR_BIT);
101 DRV_LOG(INFO, "Packet pacing rate set to %" PRIu64, rate);
103 MLX5_SET(set_pp_rate_limit_context, &pp, rate_limit, rate);
104 MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
105 sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
106 sh->txpp.pp = mlx5_glue->dv_alloc_pp
107 (sh->cdev->ctx, sizeof(pp), &pp,
108 MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
109 if (sh->txpp.pp == NULL) {
110 DRV_LOG(ERR, "Failed to allocate packet pacing index.");
114 if (!((struct mlx5dv_pp *)sh->txpp.pp)->index) {
115 DRV_LOG(ERR, "Zero packet pacing index allocated.");
116 mlx5_txpp_free_pp_index(sh);
120 sh->txpp.pp_id = ((struct mlx5dv_pp *)(sh->txpp.pp))->index;
124 DRV_LOG(ERR, "Allocating pacing index is not supported.");
131 mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
133 mlx5_devx_sq_destroy(&wq->sq_obj);
134 mlx5_devx_cq_destroy(&wq->cq_obj);
135 memset(wq, 0, sizeof(*wq));
139 mlx5_txpp_destroy_rearm_queue(struct mlx5_dev_ctx_shared *sh)
141 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
143 mlx5_txpp_destroy_send_queue(wq);
147 mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh)
149 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
151 mlx5_txpp_destroy_send_queue(wq);
153 mlx5_free(sh->txpp.tsa);
159 mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci)
161 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
162 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
170 cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32
171 (wqe[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8);
172 cs.w32[1] = wqe[ci & (wq->sq_size - 1)].ctrl[1];
173 /* Update SQ doorbell record with new SQ ci. */
174 rte_compiler_barrier();
175 *wq->sq_obj.db_rec = rte_cpu_to_be_32(wq->sq_ci);
176 /* Make sure the doorbell record is updated. */
178 /* Write to doorbel register to start processing. */
179 reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
180 __mlx5_uar_write64_relaxed(cs.w64, reg_addr, NULL);
185 mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
187 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
188 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
191 for (i = 0; i < wq->sq_size; i += 2) {
192 struct mlx5_wqe_cseg *cs;
193 struct mlx5_wqe_qseg *qs;
196 /* Build SEND_EN request with slave WQE index. */
197 cs = &wqe[i + 0].cseg;
198 cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0);
199 cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) | 2);
200 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS <<
201 MLX5_COMP_MODE_OFFSET);
202 cs->misc = RTE_BE32(0);
203 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
204 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) &
205 ((1 << MLX5_WQ_INDEX_WIDTH) - 1);
206 qs->max_index = rte_cpu_to_be_32(index);
208 rte_cpu_to_be_32(sh->txpp.clock_queue.sq_obj.sq->id);
209 /* Build WAIT request with slave CQE index. */
210 cs = &wqe[i + 1].cseg;
211 cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0);
212 cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) | 2);
213 cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR <<
214 MLX5_COMP_MODE_OFFSET);
215 cs->misc = RTE_BE32(0);
216 qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
217 index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) &
218 ((1 << MLX5_CQ_INDEX_WIDTH) - 1);
219 qs->max_index = rte_cpu_to_be_32(index);
221 rte_cpu_to_be_32(sh->txpp.clock_queue.cq_obj.cq->id);
225 /* Creates the Rearm Queue to fire the requests to Clock Queue in realtime. */
227 mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
229 struct mlx5_devx_create_sq_attr sq_attr = {
231 .state = MLX5_SQC_STATE_RST,
233 .tis_num = sh->tis->id,
234 .wq_attr = (struct mlx5_devx_wq_attr){
236 .uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
238 .ts_format = mlx5_ts_format_conv
239 (sh->cdev->config.hca_attr.sq_ts_format),
241 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
242 struct mlx5_devx_cq_attr cq_attr = {
243 .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
245 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
248 /* Create completion queue object for Rearm Queue. */
249 ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj,
250 log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,
253 DRV_LOG(ERR, "Failed to create CQ for Rearm Queue.");
258 wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE;
259 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
260 /* Create send queue object for Rearm Queue. */
261 sq_attr.cqn = wq->cq_obj.cq->id;
262 /* There should be no WQE leftovers in the cyclic queue. */
263 ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
264 log2above(MLX5_TXPP_REARM_SQ_SIZE), &sq_attr,
268 DRV_LOG(ERR, "Failed to create SQ for Rearm Queue.");
271 /* Build the WQEs in the Send Queue before goto Ready state. */
272 mlx5_txpp_fill_wqe_rearm_queue(sh);
273 /* Change queue state to ready. */
274 msq_attr.sq_state = MLX5_SQC_STATE_RST;
275 msq_attr.state = MLX5_SQC_STATE_RDY;
276 ret = mlx5_devx_cmd_modify_sq(wq->sq_obj.sq, &msq_attr);
278 DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue.");
284 mlx5_txpp_destroy_rearm_queue(sh);
290 mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
292 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
293 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->sq_obj.wqes;
294 struct mlx5_wqe_cseg *cs = &wqe->cseg;
295 uint32_t wqe_size, opcode, i;
298 /* For test purposes fill the WQ with SEND inline packet. */
300 wqe_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
302 2 * MLX5_WQE_ESEG_SIZE -
303 MLX5_ESEG_MIN_INLINE_SIZE,
305 opcode = MLX5_OPCODE_SEND;
307 wqe_size = MLX5_WSEG_SIZE;
308 opcode = MLX5_OPCODE_NOP;
310 cs->opcode = rte_cpu_to_be_32(opcode | 0); /* Index is ignored. */
311 cs->sq_ds = rte_cpu_to_be_32((wq->sq_obj.sq->id << 8) |
312 (wqe_size / MLX5_WSEG_SIZE));
313 cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
314 cs->misc = RTE_BE32(0);
315 wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE);
317 struct mlx5_wqe_eseg *es = &wqe->eseg;
318 struct rte_ether_hdr *eth_hdr;
319 struct rte_ipv4_hdr *ip_hdr;
320 struct rte_udp_hdr *udp_hdr;
322 /* Build the inline test packet pattern. */
323 MLX5_ASSERT(wqe_size <= MLX5_WQE_SIZE_MAX);
324 MLX5_ASSERT(MLX5_TXPP_TEST_PKT_SIZE >=
325 (sizeof(struct rte_ether_hdr) +
326 sizeof(struct rte_ipv4_hdr)));
328 es->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
333 es->inline_hdr_sz = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE);
334 /* Build test packet L2 header (Ethernet). */
335 dst = (uint8_t *)&es->inline_data;
336 eth_hdr = (struct rte_ether_hdr *)dst;
337 rte_eth_random_addr(ð_hdr->dst_addr.addr_bytes[0]);
338 rte_eth_random_addr(ð_hdr->src_addr.addr_bytes[0]);
339 eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
340 /* Build test packet L3 header (IP v4). */
341 dst += sizeof(struct rte_ether_hdr);
342 ip_hdr = (struct rte_ipv4_hdr *)dst;
343 ip_hdr->version_ihl = RTE_IPV4_VHL_DEF;
344 ip_hdr->type_of_service = 0;
345 ip_hdr->fragment_offset = 0;
346 ip_hdr->time_to_live = 64;
347 ip_hdr->next_proto_id = IPPROTO_UDP;
348 ip_hdr->packet_id = 0;
349 ip_hdr->total_length = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
350 sizeof(struct rte_ether_hdr));
351 /* use RFC5735 / RFC2544 reserved network test addresses */
352 ip_hdr->src_addr = RTE_BE32((198U << 24) | (18 << 16) |
354 ip_hdr->dst_addr = RTE_BE32((198U << 24) | (18 << 16) |
356 if (MLX5_TXPP_TEST_PKT_SIZE <
357 (sizeof(struct rte_ether_hdr) +
358 sizeof(struct rte_ipv4_hdr) +
359 sizeof(struct rte_udp_hdr)))
361 /* Build test packet L4 header (UDP). */
362 dst += sizeof(struct rte_ipv4_hdr);
363 udp_hdr = (struct rte_udp_hdr *)dst;
364 udp_hdr->src_port = RTE_BE16(9); /* RFC863 Discard. */
365 udp_hdr->dst_port = RTE_BE16(9);
366 udp_hdr->dgram_len = RTE_BE16(MLX5_TXPP_TEST_PKT_SIZE -
367 sizeof(struct rte_ether_hdr) -
368 sizeof(struct rte_ipv4_hdr));
369 udp_hdr->dgram_cksum = 0;
370 /* Fill the test packet data. */
371 dst += sizeof(struct rte_udp_hdr);
372 for (i = sizeof(struct rte_ether_hdr) +
373 sizeof(struct rte_ipv4_hdr) +
374 sizeof(struct rte_udp_hdr);
375 i < MLX5_TXPP_TEST_PKT_SIZE; i++)
376 *dst++ = (uint8_t)(i & 0xFF);
379 /* Duplicate the pattern to the next WQEs. */
380 dst = (uint8_t *)(uintptr_t)wq->sq_obj.umem_buf;
381 for (i = 1; i < MLX5_TXPP_CLKQ_SIZE; i++) {
383 rte_memcpy(dst, (void *)(uintptr_t)wq->sq_obj.umem_buf,
388 /* Creates the Clock Queue for packet pacing, returns zero on success. */
390 mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
392 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
393 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
394 struct mlx5_devx_cq_attr cq_attr = {
397 .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
399 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
402 sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
403 MLX5_TXPP_REARM_SQ_SIZE *
404 sizeof(struct mlx5_txpp_ts),
407 DRV_LOG(ERR, "Failed to allocate memory for CQ stats.");
412 /* Create completion queue object for Clock Queue. */
413 ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj,
414 log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr,
417 DRV_LOG(ERR, "Failed to create CQ for Clock Queue.");
421 /* Allocate memory buffer for Send Queue WQEs. */
423 wq->sq_size = RTE_ALIGN(MLX5_TXPP_TEST_PKT_SIZE +
425 2 * MLX5_WQE_ESEG_SIZE -
426 MLX5_ESEG_MIN_INLINE_SIZE,
427 MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
428 wq->sq_size *= MLX5_TXPP_CLKQ_SIZE;
430 wq->sq_size = MLX5_TXPP_CLKQ_SIZE;
432 /* There should not be WQE leftovers in the cyclic queue. */
433 MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
434 /* Create send queue object for Clock Queue. */
436 sq_attr.tis_lst_sz = 1;
437 sq_attr.tis_num = sh->tis->id;
438 sq_attr.non_wire = 0;
439 sq_attr.static_sq_wq = 1;
441 sq_attr.non_wire = 1;
442 sq_attr.static_sq_wq = 1;
444 sq_attr.cqn = wq->cq_obj.cq->id;
445 sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
446 sq_attr.wq_attr.cd_slave = 1;
447 sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
448 sq_attr.wq_attr.pd = sh->cdev->pdn;
450 mlx5_ts_format_conv(sh->cdev->config.hca_attr.sq_ts_format);
451 ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
452 log2above(wq->sq_size),
453 &sq_attr, sh->numa_node);
456 DRV_LOG(ERR, "Failed to create SQ for Clock Queue.");
459 /* Build the WQEs in the Send Queue before goto Ready state. */
460 mlx5_txpp_fill_wqe_clock_queue(sh);
461 /* Change queue state to ready. */
462 msq_attr.sq_state = MLX5_SQC_STATE_RST;
463 msq_attr.state = MLX5_SQC_STATE_RDY;
465 ret = mlx5_devx_cmd_modify_sq(wq->sq_obj.sq, &msq_attr);
467 DRV_LOG(ERR, "Failed to set SQ ready state Clock Queue.");
473 mlx5_txpp_destroy_clock_queue(sh);
478 /* Enable notification from the Rearm Queue CQ. */
480 mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh)
484 struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;
485 uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;
486 uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;
488 rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq_obj.cq->id);
489 base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
490 uint32_t *addr = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);
492 rte_compiler_barrier();
493 aq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);
496 *(uint64_t *)addr = db_be;
498 *(uint32_t *)addr = db_be;
500 *((uint32_t *)addr + 1) = db_be >> 32;
505 #if defined(RTE_ARCH_X86_64)
507 mlx5_atomic128_compare_exchange(rte_int128_t *dst,
509 const rte_int128_t *src)
513 asm volatile (MPLOCKED
516 : [dst] "=m" (dst->val[0]),
532 mlx5_atomic_read_cqe(rte_int128_t *from, rte_int128_t *ts)
535 * The only CQE of Clock Queue is being continuously
536 * updated by hardware with specified rate. We must
537 * read timestamp and WQE completion index atomically.
539 #if defined(RTE_ARCH_X86_64)
542 memset(&src, 0, sizeof(src));
544 /* if (*from == *ts) *from = *src else *ts = *from; */
545 mlx5_atomic128_compare_exchange(from, ts, &src);
547 uint64_t *cqe = (uint64_t *)from;
550 * Power architecture does not support 16B compare-and-swap.
551 * ARM implements it in software, code below is more relevant.
557 rte_compiler_barrier();
558 tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
559 op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
560 rte_compiler_barrier();
561 if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
563 if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
573 /* Stores timestamp in the cache structure to share data with datapath. */
575 mlx5_txpp_cache_timestamp(struct mlx5_dev_ctx_shared *sh,
576 uint64_t ts, uint64_t ci)
578 ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
579 ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
580 rte_compiler_barrier();
581 __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
582 __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
586 /* Reads timestamp from Clock Queue CQE and stores in the cache. */
588 mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh)
590 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
591 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;
594 struct mlx5_cqe_ts cts;
600 mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
601 opcode = MLX5_CQE_OPCODE(to.cts.op_own);
603 if (opcode != MLX5_CQE_INVALID) {
605 * Commit the error state if and only if
606 * we have got at least one actual completion.
609 "Clock Queue error sync lost (%X).", opcode);
610 __atomic_fetch_add(&sh->txpp.err_clock_queue,
611 1, __ATOMIC_RELAXED);
612 sh->txpp.sync_lost = 1;
616 ci = rte_be_to_cpu_16(to.cts.wqe_counter);
617 ts = rte_be_to_cpu_64(to.cts.timestamp);
618 ts = mlx5_txpp_convert_rx_ts(sh, ts);
619 wq->cq_ci += (ci - wq->sq_ci) & UINT16_MAX;
621 mlx5_txpp_cache_timestamp(sh, ts, wq->cq_ci);
624 /* Waits for the first completion on Clock Queue to init timestamp. */
626 mlx5_txpp_init_timestamp(struct mlx5_dev_ctx_shared *sh)
628 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
633 for (wait = 0; wait < MLX5_TXPP_WAIT_INIT_TS; wait++) {
634 mlx5_txpp_update_timestamp(sh);
637 /* Wait one millisecond and try again. */
638 rte_delay_us_sleep(US_PER_S / MS_PER_S);
640 DRV_LOG(ERR, "Unable to initialize timestamp.");
641 sh->txpp.sync_lost = 1;
644 #ifdef HAVE_IBV_DEVX_EVENT
645 /* Gather statistics for timestamp from Clock Queue CQE. */
647 mlx5_txpp_gather_timestamp(struct mlx5_dev_ctx_shared *sh)
649 /* Check whether we have a valid timestamp. */
650 if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
652 MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
653 __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
654 sh->txpp.ts.ts, __ATOMIC_RELAXED);
655 __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
656 sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
657 if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
659 if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
663 /* Handles Rearm Queue completions in periodic service. */
664 static __rte_always_inline void
665 mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh)
667 struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
668 uint32_t cq_ci = wq->cq_ci;
673 volatile struct mlx5_cqe *cqe;
675 cqe = &wq->cq_obj.cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)];
676 ret = check_cqe(cqe, MLX5_TXPP_REARM_CQ_SIZE, cq_ci);
678 case MLX5_CQE_STATUS_ERR:
682 case MLX5_CQE_STATUS_SW_OWN:
686 case MLX5_CQE_STATUS_HW_OWN:
692 } while (ret != MLX5_CQE_STATUS_HW_OWN);
693 if (likely(cq_ci != wq->cq_ci)) {
694 /* Check whether we have missed interrupts. */
695 if (cq_ci - wq->cq_ci != 1) {
696 DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
697 __atomic_fetch_add(&sh->txpp.err_miss_int,
698 1, __ATOMIC_RELAXED);
699 /* Check sync lost on wqe index. */
700 if (cq_ci - wq->cq_ci >=
701 (((1UL << MLX5_WQ_INDEX_WIDTH) /
702 MLX5_TXPP_REARM) - 1))
705 /* Update doorbell record to notify hardware. */
706 rte_compiler_barrier();
707 *wq->cq_obj.db_rec = rte_cpu_to_be_32(cq_ci);
710 /* Fire new requests to Rearm Queue. */
712 DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
713 __atomic_fetch_add(&sh->txpp.err_rearm_queue,
714 1, __ATOMIC_RELAXED);
715 sh->txpp.sync_lost = 1;
720 /* Handles Clock Queue completions in periodic service. */
721 static __rte_always_inline void
722 mlx5_txpp_handle_clock_queue(struct mlx5_dev_ctx_shared *sh)
724 mlx5_txpp_update_timestamp(sh);
725 mlx5_txpp_gather_timestamp(sh);
729 /* Invoked periodically on Rearm Queue completions. */
731 mlx5_txpp_interrupt_handler(void *cb_arg)
733 #ifndef HAVE_IBV_DEVX_EVENT
734 RTE_SET_USED(cb_arg);
737 struct mlx5_dev_ctx_shared *sh = cb_arg;
739 struct mlx5dv_devx_async_event_hdr event_resp;
740 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
743 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
744 /* Process events in the loop. Only rearm completions are expected. */
745 while (mlx5_glue->devx_get_event
749 (ssize_t)sizeof(out.event_resp.cookie)) {
750 mlx5_txpp_handle_rearm_queue(sh);
751 mlx5_txpp_handle_clock_queue(sh);
752 mlx5_txpp_cq_arm(sh);
753 mlx5_txpp_doorbell_rearm_queue
754 (sh, sh->txpp.rearm_queue.sq_ci - 1);
756 #endif /* HAVE_IBV_DEVX_ASYNC */
760 mlx5_txpp_stop_service(struct mlx5_dev_ctx_shared *sh)
762 if (!sh->txpp.intr_handle.fd)
764 mlx5_intr_callback_unregister(&sh->txpp.intr_handle,
765 mlx5_txpp_interrupt_handler, sh);
766 sh->txpp.intr_handle.fd = 0;
769 /* Attach interrupt handler and fires first request to Rearm Queue. */
771 mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
773 uint16_t event_nums[1] = {0};
777 sh->txpp.err_miss_int = 0;
778 sh->txpp.err_rearm_queue = 0;
779 sh->txpp.err_clock_queue = 0;
780 sh->txpp.err_ts_past = 0;
781 sh->txpp.err_ts_future = 0;
782 /* Attach interrupt handler to process Rearm Queue completions. */
783 fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
784 ret = mlx5_os_set_nonblock_channel_fd(fd);
786 DRV_LOG(ERR, "Failed to change event channel FD.");
790 memset(&sh->txpp.intr_handle, 0, sizeof(sh->txpp.intr_handle));
791 fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
792 sh->txpp.intr_handle.fd = fd;
793 sh->txpp.intr_handle.type = RTE_INTR_HANDLE_EXT;
794 if (rte_intr_callback_register(&sh->txpp.intr_handle,
795 mlx5_txpp_interrupt_handler, sh)) {
796 sh->txpp.intr_handle.fd = 0;
797 DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
800 /* Subscribe CQ event to the event channel controlled by the driver. */
801 ret = mlx5_os_devx_subscribe_devx_event(sh->txpp.echan,
802 sh->txpp.rearm_queue.cq_obj.cq->obj,
803 sizeof(event_nums), event_nums, 0);
805 DRV_LOG(ERR, "Failed to subscribe CQE event.");
809 /* Enable interrupts in the CQ. */
810 mlx5_txpp_cq_arm(sh);
811 /* Fire the first request on Rearm Queue. */
812 mlx5_txpp_doorbell_rearm_queue(sh, sh->txpp.rearm_queue.sq_size - 1);
813 mlx5_txpp_init_timestamp(sh);
818 * The routine initializes the packet pacing infrastructure:
819 * - allocates PP context
822 * - attaches rearm interrupt handler
823 * - starts Clock Queue
825 * Returns 0 on success, negative otherwise
828 mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
830 int tx_pp = priv->config.tx_pp;
833 /* Store the requested pacing parameters. */
834 sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp;
835 sh->txpp.test = !!(tx_pp < 0);
836 sh->txpp.skew = priv->config.tx_skew;
837 sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
838 ret = mlx5_txpp_create_event_channel(sh);
841 ret = mlx5_txpp_alloc_pp_index(sh);
844 ret = mlx5_txpp_create_clock_queue(sh);
847 ret = mlx5_txpp_create_rearm_queue(sh);
850 ret = mlx5_txpp_start_service(sh);
855 mlx5_txpp_stop_service(sh);
856 mlx5_txpp_destroy_rearm_queue(sh);
857 mlx5_txpp_destroy_clock_queue(sh);
858 mlx5_txpp_free_pp_index(sh);
859 mlx5_txpp_destroy_event_channel(sh);
868 * The routine destroys the packet pacing infrastructure:
869 * - detaches rearm interrupt handler
875 mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh)
877 mlx5_txpp_stop_service(sh);
878 mlx5_txpp_destroy_rearm_queue(sh);
879 mlx5_txpp_destroy_clock_queue(sh);
880 mlx5_txpp_free_pp_index(sh);
881 mlx5_txpp_destroy_event_channel(sh);
888 * Creates and starts packet pacing infrastructure on specified device.
891 * Pointer to Ethernet device structure.
894 * 0 on success, a negative errno value otherwise and rte_errno is set.
897 mlx5_txpp_start(struct rte_eth_dev *dev)
899 struct mlx5_priv *priv = dev->data->dev_private;
900 struct mlx5_dev_ctx_shared *sh = priv->sh;
904 if (!priv->config.tx_pp) {
905 /* Packet pacing is not requested for the device. */
906 MLX5_ASSERT(priv->txpp_en == 0);
910 /* Packet pacing is already enabled for the device. */
911 MLX5_ASSERT(sh->txpp.refcnt);
914 if (priv->config.tx_pp > 0) {
915 ret = rte_mbuf_dynflag_lookup
916 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
920 ret = pthread_mutex_lock(&sh->txpp.mutex);
923 if (sh->txpp.refcnt) {
927 err = mlx5_txpp_create(sh, priv);
929 MLX5_ASSERT(sh->txpp.tick);
936 ret = pthread_mutex_unlock(&sh->txpp.mutex);
943 * Stops and destroys packet pacing infrastructure on specified device.
946 * Pointer to Ethernet device structure.
949 * 0 on success, a negative errno value otherwise and rte_errno is set.
952 mlx5_txpp_stop(struct rte_eth_dev *dev)
954 struct mlx5_priv *priv = dev->data->dev_private;
955 struct mlx5_dev_ctx_shared *sh = priv->sh;
958 if (!priv->txpp_en) {
959 /* Packet pacing is already disabled for the device. */
963 ret = pthread_mutex_lock(&sh->txpp.mutex);
966 MLX5_ASSERT(sh->txpp.refcnt);
967 if (!sh->txpp.refcnt || --sh->txpp.refcnt)
969 /* No references any more, do actual destroy. */
970 mlx5_txpp_destroy(sh);
971 ret = pthread_mutex_unlock(&sh->txpp.mutex);
977 * Read the current clock counter of an Ethernet device
979 * This returns the current raw clock value of an Ethernet device. It is
980 * a raw amount of ticks, with no given time reference.
981 * The value returned here is from the same clock than the one
982 * filling timestamp field of Rx/Tx packets when using hardware timestamp
983 * offload. Therefore it can be used to compute a precise conversion of
984 * the device clock to the real time.
987 * Pointer to Ethernet device structure.
989 * Pointer to the uint64_t that holds the raw clock value.
993 * - -ENOTSUP: The function is not supported in this mode. Requires
994 * packet pacing module configured and started (tx_pp devarg)
997 mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp)
999 struct mlx5_priv *priv = dev->data->dev_private;
1000 struct mlx5_dev_ctx_shared *sh = priv->sh;
1003 if (sh->txpp.refcnt) {
1004 struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
1005 struct mlx5_cqe *cqe =
1006 (struct mlx5_cqe *)(uintptr_t)wq->cq_obj.cqes;
1009 struct mlx5_cqe_ts cts;
1013 mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
1014 if (to.cts.op_own >> 4) {
1015 DRV_LOG(DEBUG, "Clock Queue error sync lost.");
1016 __atomic_fetch_add(&sh->txpp.err_clock_queue,
1017 1, __ATOMIC_RELAXED);
1018 sh->txpp.sync_lost = 1;
1021 ts = rte_be_to_cpu_64(to.cts.timestamp);
1022 ts = mlx5_txpp_convert_rx_ts(sh, ts);
1026 /* Not supported in isolated mode - kernel does not see the CQEs. */
1027 if (priv->isolated || rte_eal_process_type() != RTE_PROC_PRIMARY)
1029 ret = mlx5_read_clock(dev, timestamp);
1034 * DPDK callback to clear device extended statistics.
1037 * Pointer to Ethernet device structure.
1040 * 0 on success and stats is reset, negative errno value otherwise and
1043 int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
1045 struct mlx5_priv *priv = dev->data->dev_private;
1046 struct mlx5_dev_ctx_shared *sh = priv->sh;
1048 __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
1049 __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
1050 __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
1051 __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
1052 __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
1057 * Routine to retrieve names of extended device statistics
1058 * for packet send scheduling. It appends the specific stats names
1059 * after the parts filled by preceding modules (eth stats, etc.)
1062 * Pointer to Ethernet device structure.
1063 * @param[out] xstats_names
1064 * Buffer to insert names into.
1068 * Number of names filled by preceding statistics modules.
1071 * Number of xstats names.
1073 int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1074 struct rte_eth_xstat_name *xstats_names,
1075 unsigned int n, unsigned int n_used)
1077 unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names);
1080 if (n >= n_used + n_txpp && xstats_names) {
1081 for (i = 0; i < n_txpp; ++i) {
1082 strncpy(xstats_names[i + n_used].name,
1083 mlx5_txpp_stat_names[i],
1084 RTE_ETH_XSTATS_NAME_SIZE);
1085 xstats_names[i + n_used].name
1086 [RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
1089 return n_used + n_txpp;
1093 mlx5_txpp_read_tsa(struct mlx5_dev_txpp *txpp,
1094 struct mlx5_txpp_ts *tsa, uint16_t idx)
1099 ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
1100 ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
1101 rte_compiler_barrier();
1102 if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
1104 if (__atomic_load_n(&txpp->tsa[idx].ts,
1105 __ATOMIC_RELAXED) != ts)
1107 if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
1108 __ATOMIC_RELAXED) != ci)
1117 * Jitter reflects the clock change between
1118 * neighbours Clock Queue completions.
1121 mlx5_txpp_xstats_jitter(struct mlx5_dev_txpp *txpp)
1123 struct mlx5_txpp_ts tsa0, tsa1;
1127 if (txpp->ts_n < 2) {
1128 /* No gathered enough reports yet. */
1135 rte_compiler_barrier();
1138 ts_0 += MLX5_TXPP_REARM_SQ_SIZE;
1141 ts_1 += MLX5_TXPP_REARM_SQ_SIZE;
1142 mlx5_txpp_read_tsa(txpp, &tsa0, ts_0);
1143 mlx5_txpp_read_tsa(txpp, &tsa1, ts_1);
1144 rte_compiler_barrier();
1145 } while (ts_p != txpp->ts_p);
1146 /* We have two neighbor reports, calculate the jitter. */
1147 dts = tsa1.ts - tsa0.ts;
1148 dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) -
1149 (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH));
1151 dci += 1 << MLX5_CQ_INDEX_WIDTH;
1153 return (dts > dci) ? dts - dci : dci - dts;
1157 * Wander reflects the long-term clock change
1158 * over the entire length of all Clock Queue completions.
1161 mlx5_txpp_xstats_wander(struct mlx5_dev_txpp *txpp)
1163 struct mlx5_txpp_ts tsa0, tsa1;
1167 if (txpp->ts_n < MLX5_TXPP_REARM_SQ_SIZE) {
1168 /* No gathered enough reports yet. */
1175 rte_compiler_barrier();
1176 ts_0 = ts_p - MLX5_TXPP_REARM_SQ_SIZE / 2 - 1;
1178 ts_0 += MLX5_TXPP_REARM_SQ_SIZE;
1181 ts_1 += MLX5_TXPP_REARM_SQ_SIZE;
1182 mlx5_txpp_read_tsa(txpp, &tsa0, ts_0);
1183 mlx5_txpp_read_tsa(txpp, &tsa1, ts_1);
1184 rte_compiler_barrier();
1185 } while (ts_p != txpp->ts_p);
1186 /* We have two neighbor reports, calculate the jitter. */
1187 dts = tsa1.ts - tsa0.ts;
1188 dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) -
1189 (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH));
1190 dci += 1 << MLX5_CQ_INDEX_WIDTH;
1192 return (dts > dci) ? dts - dci : dci - dts;
1196 * Routine to retrieve extended device statistics
1197 * for packet send scheduling. It appends the specific statistics
1198 * after the parts filled by preceding modules (eth stats, etc.)
1201 * Pointer to Ethernet device.
1203 * Pointer to rte extended stats table.
1205 * The size of the stats table.
1207 * Number of stats filled by preceding statistics modules.
1210 * Number of extended stats on success and stats is filled,
1211 * negative on error and rte_errno is set.
1214 mlx5_txpp_xstats_get(struct rte_eth_dev *dev,
1215 struct rte_eth_xstat *stats,
1216 unsigned int n, unsigned int n_used)
1218 unsigned int n_txpp = RTE_DIM(mlx5_txpp_stat_names);
1220 if (n >= n_used + n_txpp && stats) {
1221 struct mlx5_priv *priv = dev->data->dev_private;
1222 struct mlx5_dev_ctx_shared *sh = priv->sh;
1225 for (i = 0; i < n_txpp; ++i)
1226 stats[n_used + i].id = n_used + i;
1227 stats[n_used + 0].value =
1228 __atomic_load_n(&sh->txpp.err_miss_int,
1230 stats[n_used + 1].value =
1231 __atomic_load_n(&sh->txpp.err_rearm_queue,
1233 stats[n_used + 2].value =
1234 __atomic_load_n(&sh->txpp.err_clock_queue,
1236 stats[n_used + 3].value =
1237 __atomic_load_n(&sh->txpp.err_ts_past,
1239 stats[n_used + 4].value =
1240 __atomic_load_n(&sh->txpp.err_ts_future,
1242 stats[n_used + 5].value = mlx5_txpp_xstats_jitter(&sh->txpp);
1243 stats[n_used + 6].value = mlx5_txpp_xstats_wander(&sh->txpp);
1244 stats[n_used + 7].value = sh->txpp.sync_lost;
1246 return n_used + n_txpp;