1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_eal_paging.h>
9 #include <mlx5_malloc.h>
10 #include <mlx5_common_os.h>
11 #include <mlx5_common_devx.h>
14 #include "mlx5_flow.h"
17 * Destroy Completion Queue used for ASO access.
23 mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
26 mlx5_devx_cq_destroy(&cq->cq_obj);
27 memset(cq, 0, sizeof(*cq));
31 * Create Completion Queue used for ASO access.
34 * Context returned from mlx5 open_device() glue function.
36 * Pointer to CQ to create.
37 * @param[in] log_desc_n
38 * Log of number of descriptors in queue.
40 * Socket to use for allocation.
41 * @param[in] uar_page_id
45 * 0 on success, a negative errno value otherwise and rte_errno is set.
48 mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
49 int socket, int uar_page_id)
51 struct mlx5_devx_cq_attr attr = {
52 .uar_page_id = uar_page_id,
55 cq->log_desc_n = log_desc_n;
57 return mlx5_devx_cq_create(ctx, &cq->cq_obj, log_desc_n, &attr, socket);
64 * Pointer to the mlx5 common device.
69 mlx5_aso_dereg_mr(struct mlx5_common_device *cdev, struct mlx5_pmd_mr *mr)
71 void *addr = mr->addr;
73 cdev->mr_scache.dereg_mr_cb(mr);
75 memset(mr, 0, sizeof(*mr));
79 * Register Memory Region.
82 * Pointer to the mlx5 common device.
86 * Pointer to MR to create.
88 * Socket to use for allocation.
91 * 0 on success, a negative errno value otherwise and rte_errno is set.
94 mlx5_aso_reg_mr(struct mlx5_common_device *cdev, size_t length,
95 struct mlx5_pmd_mr *mr, int socket)
100 mr->addr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
103 DRV_LOG(ERR, "Failed to create ASO bits mem for MR.");
106 ret = cdev->mr_scache.reg_mr_cb(cdev->pd, mr->addr, length, mr);
108 DRV_LOG(ERR, "Failed to create direct Mkey.");
116 * Destroy Send Queue used for ASO access.
122 mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
124 mlx5_devx_sq_destroy(&sq->sq_obj);
125 mlx5_aso_cq_destroy(&sq->cq);
126 memset(sq, 0, sizeof(*sq));
130 * Initialize Send Queue used for ASO access.
133 * ASO SQ to initialize.
136 mlx5_aso_age_init_sq(struct mlx5_aso_sq *sq)
138 volatile struct mlx5_aso_wqe *restrict wqe;
140 int size = 1 << sq->log_desc_n;
143 /* All the next fields state should stay constant. */
144 for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
145 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
146 (sizeof(*wqe) >> 4));
147 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.lkey);
148 addr = (uint64_t)((uint64_t *)sq->mr.addr + i *
149 MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
150 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
151 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
152 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
154 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
155 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
156 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
157 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
158 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
163 * Initialize Send Queue used for ASO flow meter access.
166 * ASO SQ to initialize.
169 mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq)
171 volatile struct mlx5_aso_wqe *restrict wqe;
173 int size = 1 << sq->log_desc_n;
175 /* All the next fields state should stay constant. */
176 for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
177 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
178 (sizeof(*wqe) >> 4));
179 wqe->aso_cseg.operand_masks = RTE_BE32(0u |
180 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
181 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
182 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
183 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
184 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
185 MLX5_COMP_MODE_OFFSET);
190 * Initialize Send Queue used for ASO connection tracking.
193 * ASO SQ to initialize.
196 mlx5_aso_ct_init_sq(struct mlx5_aso_sq *sq)
198 volatile struct mlx5_aso_wqe *restrict wqe;
200 int size = 1 << sq->log_desc_n;
203 /* All the next fields state should stay constant. */
204 for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
205 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
206 (sizeof(*wqe) >> 4));
207 /* One unique MR for the query data. */
208 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.lkey);
209 /* Magic number 64 represents the length of a ASO CT obj. */
210 addr = (uint64_t)((uintptr_t)sq->mr.addr + i * 64);
211 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
212 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
214 * The values of operand_masks are different for modify
216 * And data_mask may be different for each modification. In
217 * query, it could be zero and ignored.
218 * CQE generation is always needed, in order to decide when
219 * it is available to create the flow or read the data.
221 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
222 MLX5_COMP_MODE_OFFSET);
227 * Create Send Queue used for ASO access.
230 * Context returned from mlx5 open_device() glue function.
232 * Pointer to SQ to create.
234 * Socket to use for allocation.
236 * User Access Region object.
238 * Protection Domain number to use.
239 * @param[in] log_desc_n
240 * Log of number of descriptors in queue.
241 * @param[in] ts_format
242 * timestamp format supported by the queue.
245 * 0 on success, a negative errno value otherwise and rte_errno is set.
248 mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket, void *uar,
249 uint32_t pdn, uint16_t log_desc_n, uint32_t ts_format)
251 struct mlx5_devx_create_sq_attr attr = {
252 .user_index = 0xFFFF,
253 .wq_attr = (struct mlx5_devx_wq_attr){
255 .uar_page = mlx5_os_get_devx_uar_page_id(uar),
257 .ts_format = mlx5_ts_format_conv(ts_format),
259 struct mlx5_devx_modify_sq_attr modify_attr = {
260 .state = MLX5_SQC_STATE_RDY,
265 if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
266 mlx5_os_get_devx_uar_page_id(uar)))
268 sq->log_desc_n = log_desc_n;
269 attr.cqn = sq->cq.cq_obj.cq->id;
270 /* for mlx5_aso_wqe that is twice the size of mlx5_wqe */
271 log_wqbb_n = log_desc_n + 1;
272 ret = mlx5_devx_sq_create(ctx, &sq->sq_obj, log_wqbb_n, &attr, socket);
274 DRV_LOG(ERR, "Can't create SQ object.");
278 ret = mlx5_devx_cmd_modify_sq(sq->sq_obj.sq, &modify_attr);
280 DRV_LOG(ERR, "Can't change SQ state to ready.");
287 sq->sqn = sq->sq_obj.sq->id;
288 sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar);
289 rte_spinlock_init(&sq->sqsl);
292 mlx5_aso_destroy_sq(sq);
297 * API to create and initialize Send Queue used for ASO access.
300 * Pointer to shared device context.
301 * @param[in] aso_opc_mod
302 * Mode of ASO feature.
305 * 0 on success, a negative errno value otherwise and rte_errno is set.
308 mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
309 enum mlx5_access_aso_opc_mod aso_opc_mod)
311 uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
312 struct mlx5_common_device *cdev = sh->cdev;
314 switch (aso_opc_mod) {
315 case ASO_OPC_MOD_FLOW_HIT:
316 if (mlx5_aso_reg_mr(cdev, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
317 sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
319 if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,
320 sh->tx_uar, cdev->pdn,
321 MLX5_ASO_QUEUE_LOG_DESC,
322 cdev->config.hca_attr.sq_ts_format)) {
323 mlx5_aso_dereg_mr(cdev, &sh->aso_age_mng->aso_sq.mr);
326 mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
328 case ASO_OPC_MOD_POLICER:
329 if (mlx5_aso_sq_create(cdev->ctx, &sh->mtrmng->pools_mng.sq, 0,
330 sh->tx_uar, cdev->pdn,
331 MLX5_ASO_QUEUE_LOG_DESC,
332 cdev->config.hca_attr.sq_ts_format))
334 mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
336 case ASO_OPC_MOD_CONNECTION_TRACKING:
337 /* 64B per object for query. */
338 if (mlx5_aso_reg_mr(cdev, 64 * sq_desc_n,
339 &sh->ct_mng->aso_sq.mr, 0))
341 if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,
342 sh->tx_uar, cdev->pdn,
343 MLX5_ASO_QUEUE_LOG_DESC,
344 cdev->config.hca_attr.sq_ts_format)) {
345 mlx5_aso_dereg_mr(cdev, &sh->ct_mng->aso_sq.mr);
348 mlx5_aso_ct_init_sq(&sh->ct_mng->aso_sq);
351 DRV_LOG(ERR, "Unknown ASO operation mode");
358 * API to destroy Send Queue used for ASO access.
361 * Pointer to shared device context.
362 * @param[in] aso_opc_mod
363 * Mode of ASO feature.
366 mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
367 enum mlx5_access_aso_opc_mod aso_opc_mod)
369 struct mlx5_aso_sq *sq;
371 switch (aso_opc_mod) {
372 case ASO_OPC_MOD_FLOW_HIT:
373 mlx5_aso_dereg_mr(sh->cdev, &sh->aso_age_mng->aso_sq.mr);
374 sq = &sh->aso_age_mng->aso_sq;
376 case ASO_OPC_MOD_POLICER:
377 sq = &sh->mtrmng->pools_mng.sq;
379 case ASO_OPC_MOD_CONNECTION_TRACKING:
380 mlx5_aso_dereg_mr(sh->cdev, &sh->ct_mng->aso_sq.mr);
381 sq = &sh->ct_mng->aso_sq;
384 DRV_LOG(ERR, "Unknown ASO operation mode");
387 mlx5_aso_destroy_sq(sq);
391 * Write a burst of WQEs to ASO SQ.
394 * ASO management data, contains the SQ.
396 * Index of the last valid pool.
399 * Number of WQEs in burst.
402 mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
404 volatile struct mlx5_aso_wqe *wqe;
405 struct mlx5_aso_sq *sq = &mng->aso_sq;
406 struct mlx5_aso_age_pool *pool;
407 uint16_t size = 1 << sq->log_desc_n;
408 uint16_t mask = size - 1;
410 uint16_t start_head = sq->head;
412 max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next);
415 sq->elts[start_head & mask].burst_size = max;
417 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
418 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
420 rte_spinlock_lock(&mng->resize_sl);
421 pool = mng->pools[sq->next];
422 rte_spinlock_unlock(&mng->resize_sl);
423 sq->elts[sq->head & mask].pool = pool;
424 wqe->general_cseg.misc =
425 rte_cpu_to_be_32(((struct mlx5_devx_obj *)
426 (pool->flow_hit_aso_obj))->id);
427 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
428 MLX5_COMP_MODE_OFFSET);
429 wqe->general_cseg.opcode = rte_cpu_to_be_32
430 (MLX5_OPCODE_ACCESS_ASO |
431 (ASO_OPC_MOD_FLOW_HIT <<
432 WQE_CSEG_OPC_MOD_OFFSET) |
434 WQE_CSEG_WQE_INDEX_OFFSET));
435 sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
440 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
441 MLX5_COMP_MODE_OFFSET);
443 sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
445 *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
447 return sq->elts[start_head & mask].burst_size;
451 * Debug utility function. Dump contents of error CQE and WQE.
459 mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
463 DRV_LOG(ERR, "Error cqe:");
464 for (i = 0; i < 16; i += 4)
465 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
466 cqe[i + 2], cqe[i + 3]);
467 DRV_LOG(ERR, "\nError wqe:");
468 for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
469 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
470 wqe[i + 2], wqe[i + 3]);
474 * Handle case of error CQE.
480 mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
482 struct mlx5_aso_cq *cq = &sq->cq;
483 uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
484 volatile struct mlx5_err_cqe *cqe =
485 (volatile struct mlx5_err_cqe *)&cq->cq_obj.cqes[idx];
488 idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
489 mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
490 (volatile uint32_t *)&sq->sq_obj.aso_wqes[idx]);
494 * Update ASO objects upon completion.
497 * Shared device context.
499 * Number of completed ASO objects.
502 mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
504 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
505 struct mlx5_aso_sq *sq = &mng->aso_sq;
506 struct mlx5_age_info *age_info;
507 const uint16_t size = 1 << sq->log_desc_n;
508 const uint16_t mask = size - 1;
509 const uint64_t curr = MLX5_CURR_TIME_SEC;
510 uint16_t expected = AGE_CANDIDATE;
513 for (i = 0; i < n; ++i) {
514 uint16_t idx = (sq->tail + i) & mask;
515 struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
516 uint64_t diff = curr - pool->time_of_last_age_check;
517 uint64_t *addr = sq->mr.addr;
520 addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
521 pool->time_of_last_age_check = curr;
522 for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
523 struct mlx5_aso_age_action *act = &pool->actions[j];
524 struct mlx5_age_param *ap = &act->age_params;
530 if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
535 u8addr = (uint8_t *)addr;
536 hit = (u8addr[byte] >> offset) & 0x1;
538 __atomic_store_n(&ap->sec_since_last_hit, 0,
541 struct mlx5_priv *priv;
543 __atomic_fetch_add(&ap->sec_since_last_hit,
544 diff, __ATOMIC_RELAXED);
545 /* If timeout passed add to aged-out list. */
546 if (ap->sec_since_last_hit <= ap->timeout)
549 rte_eth_devices[ap->port_id].data->dev_private;
550 age_info = GET_PORT_AGE_INFO(priv);
551 rte_spinlock_lock(&age_info->aged_sl);
552 if (__atomic_compare_exchange_n(&ap->state,
558 LIST_INSERT_HEAD(&age_info->aged_aso,
560 MLX5_AGE_SET(age_info,
563 rte_spinlock_unlock(&age_info->aged_sl);
567 mlx5_age_event_prepare(sh);
571 * Handle completions from WQEs sent to ASO SQ.
574 * Shared device context.
577 * Number of CQEs handled.
580 mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
582 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
583 struct mlx5_aso_sq *sq = &mng->aso_sq;
584 struct mlx5_aso_cq *cq = &sq->cq;
585 volatile struct mlx5_cqe *restrict cqe;
586 const unsigned int cq_size = 1 << cq->log_desc_n;
587 const unsigned int mask = cq_size - 1;
589 uint32_t next_idx = cq->cq_ci & mask;
590 const uint16_t max = (uint16_t)(sq->head - sq->tail);
597 next_idx = (cq->cq_ci + 1) & mask;
598 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
599 cqe = &cq->cq_obj.cqes[idx];
600 ret = check_cqe(cqe, cq_size, cq->cq_ci);
602 * Be sure owner read is done before any other cookie field or
606 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
607 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
609 mlx5_aso_cqe_err_handle(sq);
611 i += sq->elts[(sq->tail + i) & mask].burst_size;
616 mlx5_aso_age_action_update(sh, i);
619 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
625 * Periodically read CQEs and send WQEs to ASO SQ.
628 * Shared device context containing the ASO SQ.
631 mlx5_flow_aso_alarm(void *arg)
633 struct mlx5_dev_ctx_shared *sh = arg;
634 struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
638 rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
639 n = sh->aso_age_mng->next;
640 rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
641 mlx5_aso_completion_handle(sh);
643 /* End of loop: wait 1 second. */
647 mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
648 if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
649 DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
653 * API to start ASO access using ASO SQ.
656 * Pointer to shared device context.
659 * 0 on success, a negative errno value otherwise and rte_errno is set.
662 mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh)
664 if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
665 DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
672 * API to stop ASO access using ASO SQ.
675 * Pointer to shared device context.
678 * 0 on success, a negative errno value otherwise and rte_errno is set.
681 mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh)
685 if (!sh->aso_age_mng->aso_sq.sq_obj.sq)
689 rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
690 if (rte_errno != EINPROGRESS)
698 mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq,
699 struct mlx5_aso_mtr *aso_mtr)
701 volatile struct mlx5_aso_wqe *wqe = NULL;
702 struct mlx5_flow_meter_info *fm = NULL;
703 struct mlx5_flow_meter_profile *fmp;
704 uint16_t size = 1 << sq->log_desc_n;
705 uint16_t mask = size - 1;
707 uint32_t dseg_idx = 0;
708 struct mlx5_aso_mtr_pool *pool = NULL;
710 rte_spinlock_lock(&sq->sqsl);
711 res = size - (uint16_t)(sq->head - sq->tail);
712 if (unlikely(!res)) {
713 DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send");
714 rte_spinlock_unlock(&sq->sqsl);
717 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
718 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
721 sq->elts[sq->head & mask].mtr = aso_mtr;
722 pool = container_of(aso_mtr, struct mlx5_aso_mtr_pool,
723 mtrs[aso_mtr->offset]);
724 wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
725 (aso_mtr->offset >> 1));
726 wqe->general_cseg.opcode = rte_cpu_to_be_32(MLX5_OPCODE_ACCESS_ASO |
727 (ASO_OPC_MOD_POLICER <<
728 WQE_CSEG_OPC_MOD_OFFSET) |
729 sq->pi << WQE_CSEG_WQE_INDEX_OFFSET);
730 /* There are 2 meters in one ASO cache line. */
731 dseg_idx = aso_mtr->offset & 0x1;
732 wqe->aso_cseg.data_mask =
733 RTE_BE64(MLX5_IFC_FLOW_METER_PARAM_MASK << (32 * !dseg_idx));
735 wqe->aso_dseg.mtrs[dseg_idx].cbs_cir =
736 fm->profile->srtcm_prm.cbs_cir;
737 wqe->aso_dseg.mtrs[dseg_idx].ebs_eir =
738 fm->profile->srtcm_prm.ebs_eir;
740 wqe->aso_dseg.mtrs[dseg_idx].cbs_cir =
741 RTE_BE32(MLX5_IFC_FLOW_METER_DISABLE_CBS_CIR_VAL);
742 wqe->aso_dseg.mtrs[dseg_idx].ebs_eir = 0;
745 if (fmp->profile.packet_mode)
746 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm =
747 RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
748 (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET) |
749 (MLX5_METER_MODE_PKT << ASO_DSEG_MTR_MODE));
751 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm =
752 RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
753 (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET));
754 switch (fmp->profile.alg) {
755 case RTE_MTR_SRTCM_RFC2697:
756 /* Only needed for RFC2697. */
757 if (fm->profile->srtcm_prm.ebs_eir)
758 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm |=
759 RTE_BE32(1 << ASO_DSEG_BO_OFFSET);
761 case RTE_MTR_TRTCM_RFC2698:
762 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm |=
763 RTE_BE32(1 << ASO_DSEG_BBOG_OFFSET);
765 case RTE_MTR_TRTCM_RFC4115:
771 * Due to software performance reason, the token fields will not be
772 * set when posting the WQE to ASO SQ. It will be filled by the HW
776 sq->pi += 2;/* Each WQE contains 2 WQEBB's. */
778 sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
780 *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH. */
782 rte_spinlock_unlock(&sq->sqsl);
787 mlx5_aso_mtrs_status_update(struct mlx5_aso_sq *sq, uint16_t aso_mtrs_nums)
789 uint16_t size = 1 << sq->log_desc_n;
790 uint16_t mask = size - 1;
792 struct mlx5_aso_mtr *aso_mtr = NULL;
793 uint8_t exp_state = ASO_METER_WAIT;
795 for (i = 0; i < aso_mtrs_nums; ++i) {
796 aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
797 MLX5_ASSERT(aso_mtr);
798 (void)__atomic_compare_exchange_n(&aso_mtr->state,
799 &exp_state, ASO_METER_READY,
800 false, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
805 mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq)
807 struct mlx5_aso_cq *cq = &sq->cq;
808 volatile struct mlx5_cqe *restrict cqe;
809 const unsigned int cq_size = 1 << cq->log_desc_n;
810 const unsigned int mask = cq_size - 1;
812 uint32_t next_idx = cq->cq_ci & mask;
817 rte_spinlock_lock(&sq->sqsl);
818 max = (uint16_t)(sq->head - sq->tail);
819 if (unlikely(!max)) {
820 rte_spinlock_unlock(&sq->sqsl);
825 next_idx = (cq->cq_ci + 1) & mask;
826 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
827 cqe = &cq->cq_obj.cqes[idx];
828 ret = check_cqe(cqe, cq_size, cq->cq_ci);
830 * Be sure owner read is done before any other cookie field or
834 if (ret != MLX5_CQE_STATUS_SW_OWN) {
835 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
837 mlx5_aso_cqe_err_handle(sq);
844 mlx5_aso_mtrs_status_update(sq, n);
847 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
849 rte_spinlock_unlock(&sq->sqsl);
853 * Update meter parameter by send WQE.
856 * Pointer to Ethernet device.
858 * Pointer to mlx5 private data structure.
860 * Pointer to flow meter to be modified.
863 * 0 on success, a negative errno value otherwise and rte_errno is set.
866 mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
867 struct mlx5_aso_mtr *mtr)
869 struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq;
870 uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
873 mlx5_aso_mtr_completion_handle(sq);
874 if (mlx5_aso_mtr_sq_enqueue_single(sq, mtr))
876 /* Waiting for wqe resource. */
877 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
878 } while (--poll_wqe_times);
879 DRV_LOG(ERR, "Fail to send WQE for ASO meter offset %d",
885 * Wait for meter to be ready.
888 * Pointer to Ethernet device.
890 * Pointer to mlx5 private data structure.
892 * Pointer to flow meter to be modified.
895 * 0 on success, a negative errno value otherwise and rte_errno is set.
898 mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,
899 struct mlx5_aso_mtr *mtr)
901 struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq;
902 uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
904 if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
908 mlx5_aso_mtr_completion_handle(sq);
909 if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
912 /* Waiting for CQE ready. */
913 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
914 } while (--poll_cqe_times);
915 DRV_LOG(ERR, "Fail to poll CQE ready for ASO meter offset %d",
921 * Post a WQE to the ASO CT SQ to modify the context.
924 * Pointer to the CT pools management structure.
926 * Pointer to the generic CT structure related to the context.
928 * Pointer to configuration profile.
931 * 1 on success (WQE number), 0 on failure.
934 mlx5_aso_ct_sq_enqueue_single(struct mlx5_aso_ct_pools_mng *mng,
935 struct mlx5_aso_ct_action *ct,
936 const struct rte_flow_action_conntrack *profile)
938 volatile struct mlx5_aso_wqe *wqe = NULL;
939 struct mlx5_aso_sq *sq = &mng->aso_sq;
940 uint16_t size = 1 << sq->log_desc_n;
941 uint16_t mask = size - 1;
943 struct mlx5_aso_ct_pool *pool;
948 rte_spinlock_lock(&sq->sqsl);
949 /* Prevent other threads to update the index. */
950 res = size - (uint16_t)(sq->head - sq->tail);
951 if (unlikely(!res)) {
952 rte_spinlock_unlock(&sq->sqsl);
953 DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send");
956 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
957 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
959 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_WAIT);
960 sq->elts[sq->head & mask].ct = ct;
961 sq->elts[sq->head & mask].query_data = NULL;
962 pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
963 /* Each WQE will have a single CT object. */
964 wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
966 wqe->general_cseg.opcode = rte_cpu_to_be_32(MLX5_OPCODE_ACCESS_ASO |
967 (ASO_OPC_MOD_CONNECTION_TRACKING <<
968 WQE_CSEG_OPC_MOD_OFFSET) |
969 sq->pi << WQE_CSEG_WQE_INDEX_OFFSET);
970 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
972 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
973 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
974 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
975 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
976 wqe->aso_cseg.data_mask = UINT64_MAX;
977 /* To make compiler happy. */
978 desg = (void *)(uintptr_t)wqe->aso_dseg.data;
979 MLX5_SET(conn_track_aso, desg, valid, 1);
980 MLX5_SET(conn_track_aso, desg, state, profile->state);
981 MLX5_SET(conn_track_aso, desg, freeze_track, !profile->enable);
982 MLX5_SET(conn_track_aso, desg, connection_assured,
983 profile->live_connection);
984 MLX5_SET(conn_track_aso, desg, sack_permitted, profile->selective_ack);
985 MLX5_SET(conn_track_aso, desg, challenged_acked,
986 profile->challenge_ack_passed);
987 /* Heartbeat, retransmission_counter, retranmission_limit_exceeded: 0 */
988 MLX5_SET(conn_track_aso, desg, heartbeat, 0);
989 MLX5_SET(conn_track_aso, desg, max_ack_window,
990 profile->max_ack_window);
991 MLX5_SET(conn_track_aso, desg, retransmission_counter, 0);
992 MLX5_SET(conn_track_aso, desg, retranmission_limit_exceeded, 0);
993 MLX5_SET(conn_track_aso, desg, retranmission_limit,
994 profile->retransmission_limit);
995 MLX5_SET(conn_track_aso, desg, reply_direction_tcp_scale,
996 profile->reply_dir.scale);
997 MLX5_SET(conn_track_aso, desg, reply_direction_tcp_close_initiated,
998 profile->reply_dir.close_initiated);
999 /* Both directions will use the same liberal mode. */
1000 MLX5_SET(conn_track_aso, desg, reply_direction_tcp_liberal_enabled,
1001 profile->liberal_mode);
1002 MLX5_SET(conn_track_aso, desg, reply_direction_tcp_data_unacked,
1003 profile->reply_dir.data_unacked);
1004 MLX5_SET(conn_track_aso, desg, reply_direction_tcp_max_ack,
1005 profile->reply_dir.last_ack_seen);
1006 MLX5_SET(conn_track_aso, desg, original_direction_tcp_scale,
1007 profile->original_dir.scale);
1008 MLX5_SET(conn_track_aso, desg, original_direction_tcp_close_initiated,
1009 profile->original_dir.close_initiated);
1010 MLX5_SET(conn_track_aso, desg, original_direction_tcp_liberal_enabled,
1011 profile->liberal_mode);
1012 MLX5_SET(conn_track_aso, desg, original_direction_tcp_data_unacked,
1013 profile->original_dir.data_unacked);
1014 MLX5_SET(conn_track_aso, desg, original_direction_tcp_max_ack,
1015 profile->original_dir.last_ack_seen);
1016 MLX5_SET(conn_track_aso, desg, last_win, profile->last_window);
1017 MLX5_SET(conn_track_aso, desg, last_dir, profile->last_direction);
1018 MLX5_SET(conn_track_aso, desg, last_index, profile->last_index);
1019 MLX5_SET(conn_track_aso, desg, last_seq, profile->last_seq);
1020 MLX5_SET(conn_track_aso, desg, last_ack, profile->last_ack);
1021 MLX5_SET(conn_track_aso, desg, last_end, profile->last_end);
1022 orig_dir = MLX5_ADDR_OF(conn_track_aso, desg, original_dir);
1023 MLX5_SET(tcp_window_params, orig_dir, sent_end,
1024 profile->original_dir.sent_end);
1025 MLX5_SET(tcp_window_params, orig_dir, reply_end,
1026 profile->original_dir.reply_end);
1027 MLX5_SET(tcp_window_params, orig_dir, max_win,
1028 profile->original_dir.max_win);
1029 MLX5_SET(tcp_window_params, orig_dir, max_ack,
1030 profile->original_dir.max_ack);
1031 reply_dir = MLX5_ADDR_OF(conn_track_aso, desg, reply_dir);
1032 MLX5_SET(tcp_window_params, reply_dir, sent_end,
1033 profile->reply_dir.sent_end);
1034 MLX5_SET(tcp_window_params, reply_dir, reply_end,
1035 profile->reply_dir.reply_end);
1036 MLX5_SET(tcp_window_params, reply_dir, max_win,
1037 profile->reply_dir.max_win);
1038 MLX5_SET(tcp_window_params, reply_dir, max_ack,
1039 profile->reply_dir.max_ack);
1041 sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
1043 sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
1045 *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH. */
1047 rte_spinlock_unlock(&sq->sqsl);
1052 * Update the status field of CTs to indicate ready to be used by flows.
1053 * A continuous number of CTs since last update.
1056 * Pointer to ASO CT SQ.
1058 * Number of CT structures to be updated.
1061 * 0 on success, a negative value.
1064 mlx5_aso_ct_status_update(struct mlx5_aso_sq *sq, uint16_t num)
1066 uint16_t size = 1 << sq->log_desc_n;
1067 uint16_t mask = size - 1;
1069 struct mlx5_aso_ct_action *ct = NULL;
1072 for (i = 0; i < num; i++) {
1073 idx = (uint16_t)((sq->tail + i) & mask);
1074 ct = sq->elts[idx].ct;
1076 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_READY);
1077 if (sq->elts[idx].query_data)
1078 rte_memcpy(sq->elts[idx].query_data,
1079 (char *)((uintptr_t)sq->mr.addr + idx * 64),
1085 * Post a WQE to the ASO CT SQ to query the current context.
1088 * Pointer to the CT pools management structure.
1090 * Pointer to the generic CT structure related to the context.
1092 * Pointer to data area to be filled.
1095 * 1 on success (WQE number), 0 on failure.
1098 mlx5_aso_ct_sq_query_single(struct mlx5_aso_ct_pools_mng *mng,
1099 struct mlx5_aso_ct_action *ct, char *data)
1101 volatile struct mlx5_aso_wqe *wqe = NULL;
1102 struct mlx5_aso_sq *sq = &mng->aso_sq;
1103 uint16_t size = 1 << sq->log_desc_n;
1104 uint16_t mask = size - 1;
1107 struct mlx5_aso_ct_pool *pool;
1108 enum mlx5_aso_ct_state state =
1109 __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
1111 if (state == ASO_CONNTRACK_FREE) {
1112 DRV_LOG(ERR, "Fail: No context to query");
1114 } else if (state == ASO_CONNTRACK_WAIT) {
1117 rte_spinlock_lock(&sq->sqsl);
1118 res = size - (uint16_t)(sq->head - sq->tail);
1119 if (unlikely(!res)) {
1120 rte_spinlock_unlock(&sq->sqsl);
1121 DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send");
1124 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_QUERY);
1125 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
1126 /* Confirm the location and address of the prefetch instruction. */
1127 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
1128 /* Fill next WQE. */
1129 wqe_idx = sq->head & mask;
1130 sq->elts[wqe_idx].ct = ct;
1131 sq->elts[wqe_idx].query_data = data;
1132 pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
1133 /* Each WQE will have a single CT object. */
1134 wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
1136 wqe->general_cseg.opcode = rte_cpu_to_be_32(MLX5_OPCODE_ACCESS_ASO |
1137 (ASO_OPC_MOD_CONNECTION_TRACKING <<
1138 WQE_CSEG_OPC_MOD_OFFSET) |
1139 sq->pi << WQE_CSEG_WQE_INDEX_OFFSET);
1141 * There is no write request is required.
1142 * ASO_OPER_LOGICAL_AND and ASO_OP_ALWAYS_FALSE are both 0.
1143 * "BYTEWISE_64BYTE" is needed for a whole context.
1144 * Set to 0 directly to reduce an endian swap. (Modify should rewrite.)
1145 * "data_mask" is ignored.
1146 * Buffer address was already filled during initialization.
1148 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32(BYTEWISE_64BYTE <<
1149 ASO_CSEG_DATA_MASK_MODE_OFFSET);
1150 wqe->aso_cseg.data_mask = 0;
1153 * Each WQE contains 2 WQEBB's, even though
1154 * data segment is not used in this case.
1158 sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
1160 *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH. */
1162 rte_spinlock_unlock(&sq->sqsl);
1167 * Handle completions from WQEs sent to ASO CT.
1170 * Pointer to the CT pools management structure.
1173 mlx5_aso_ct_completion_handle(struct mlx5_aso_ct_pools_mng *mng)
1175 struct mlx5_aso_sq *sq = &mng->aso_sq;
1176 struct mlx5_aso_cq *cq = &sq->cq;
1177 volatile struct mlx5_cqe *restrict cqe;
1178 const uint32_t cq_size = 1 << cq->log_desc_n;
1179 const uint32_t mask = cq_size - 1;
1186 rte_spinlock_lock(&sq->sqsl);
1187 max = (uint16_t)(sq->head - sq->tail);
1188 if (unlikely(!max)) {
1189 rte_spinlock_unlock(&sq->sqsl);
1192 next_idx = cq->cq_ci & mask;
1195 next_idx = (cq->cq_ci + 1) & mask;
1196 /* Need to confirm the position of the prefetch. */
1197 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
1198 cqe = &cq->cq_obj.cqes[idx];
1199 ret = check_cqe(cqe, cq_size, cq->cq_ci);
1201 * Be sure owner read is done before any other cookie field or
1205 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1206 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
1208 mlx5_aso_cqe_err_handle(sq);
1215 mlx5_aso_ct_status_update(sq, n);
1218 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
1220 rte_spinlock_unlock(&sq->sqsl);
1224 * Update connection tracking ASO context by sending WQE.
1227 * Pointer to mlx5_dev_ctx_shared object.
1229 * Pointer to connection tracking offload object.
1230 * @param[in] profile
1231 * Pointer to connection tracking TCP parameter.
1234 * 0 on success, -1 on failure.
1237 mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
1238 struct mlx5_aso_ct_action *ct,
1239 const struct rte_flow_action_conntrack *profile)
1241 struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
1242 uint32_t poll_wqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
1243 struct mlx5_aso_ct_pool *pool;
1247 mlx5_aso_ct_completion_handle(mng);
1248 if (mlx5_aso_ct_sq_enqueue_single(mng, ct, profile))
1250 /* Waiting for wqe resource. */
1251 rte_delay_us_sleep(10u);
1252 } while (--poll_wqe_times);
1253 pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
1254 DRV_LOG(ERR, "Fail to send WQE for ASO CT %d in pool %d",
1255 ct->offset, pool->index);
1260 * The routine is used to wait for WQE completion to continue with queried data.
1263 * Pointer to mlx5_dev_ctx_shared object.
1265 * Pointer to connection tracking offload object.
1268 * 0 on success, -1 on failure.
1271 mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh,
1272 struct mlx5_aso_ct_action *ct)
1274 struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
1275 uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
1276 struct mlx5_aso_ct_pool *pool;
1278 if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
1279 ASO_CONNTRACK_READY)
1282 mlx5_aso_ct_completion_handle(mng);
1283 if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
1284 ASO_CONNTRACK_READY)
1286 /* Waiting for CQE ready, consider should block or sleep. */
1287 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
1288 } while (--poll_cqe_times);
1289 pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
1290 DRV_LOG(ERR, "Fail to poll CQE for ASO CT %d in pool %d",
1291 ct->offset, pool->index);
1296 * Convert the hardware conntrack data format into the profile.
1298 * @param[in] profile
1299 * Pointer to conntrack profile to be filled after query.
1301 * Pointer to data fetched from hardware.
1304 mlx5_aso_ct_obj_analyze(struct rte_flow_action_conntrack *profile,
1307 void *o_dir = MLX5_ADDR_OF(conn_track_aso, wdata, original_dir);
1308 void *r_dir = MLX5_ADDR_OF(conn_track_aso, wdata, reply_dir);
1310 /* MLX5_GET16 should be taken into consideration. */
1311 profile->state = (enum rte_flow_conntrack_state)
1312 MLX5_GET(conn_track_aso, wdata, state);
1313 profile->enable = !MLX5_GET(conn_track_aso, wdata, freeze_track);
1314 profile->selective_ack = MLX5_GET(conn_track_aso, wdata,
1316 profile->live_connection = MLX5_GET(conn_track_aso, wdata,
1317 connection_assured);
1318 profile->challenge_ack_passed = MLX5_GET(conn_track_aso, wdata,
1320 profile->max_ack_window = MLX5_GET(conn_track_aso, wdata,
1322 profile->retransmission_limit = MLX5_GET(conn_track_aso, wdata,
1323 retranmission_limit);
1324 profile->last_window = MLX5_GET(conn_track_aso, wdata, last_win);
1325 profile->last_direction = MLX5_GET(conn_track_aso, wdata, last_dir);
1326 profile->last_index = (enum rte_flow_conntrack_tcp_last_index)
1327 MLX5_GET(conn_track_aso, wdata, last_index);
1328 profile->last_seq = MLX5_GET(conn_track_aso, wdata, last_seq);
1329 profile->last_ack = MLX5_GET(conn_track_aso, wdata, last_ack);
1330 profile->last_end = MLX5_GET(conn_track_aso, wdata, last_end);
1331 profile->liberal_mode = MLX5_GET(conn_track_aso, wdata,
1332 reply_direction_tcp_liberal_enabled) |
1333 MLX5_GET(conn_track_aso, wdata,
1334 original_direction_tcp_liberal_enabled);
1335 /* No liberal in the RTE structure profile. */
1336 profile->reply_dir.scale = MLX5_GET(conn_track_aso, wdata,
1337 reply_direction_tcp_scale);
1338 profile->reply_dir.close_initiated = MLX5_GET(conn_track_aso, wdata,
1339 reply_direction_tcp_close_initiated);
1340 profile->reply_dir.data_unacked = MLX5_GET(conn_track_aso, wdata,
1341 reply_direction_tcp_data_unacked);
1342 profile->reply_dir.last_ack_seen = MLX5_GET(conn_track_aso, wdata,
1343 reply_direction_tcp_max_ack);
1344 profile->reply_dir.sent_end = MLX5_GET(tcp_window_params,
1346 profile->reply_dir.reply_end = MLX5_GET(tcp_window_params,
1348 profile->reply_dir.max_win = MLX5_GET(tcp_window_params,
1350 profile->reply_dir.max_ack = MLX5_GET(tcp_window_params,
1352 profile->original_dir.scale = MLX5_GET(conn_track_aso, wdata,
1353 original_direction_tcp_scale);
1354 profile->original_dir.close_initiated = MLX5_GET(conn_track_aso, wdata,
1355 original_direction_tcp_close_initiated);
1356 profile->original_dir.data_unacked = MLX5_GET(conn_track_aso, wdata,
1357 original_direction_tcp_data_unacked);
1358 profile->original_dir.last_ack_seen = MLX5_GET(conn_track_aso, wdata,
1359 original_direction_tcp_max_ack);
1360 profile->original_dir.sent_end = MLX5_GET(tcp_window_params,
1362 profile->original_dir.reply_end = MLX5_GET(tcp_window_params,
1364 profile->original_dir.max_win = MLX5_GET(tcp_window_params,
1366 profile->original_dir.max_ack = MLX5_GET(tcp_window_params,
1371 * Query connection tracking information parameter by send WQE.
1374 * Pointer to Ethernet device.
1376 * Pointer to connection tracking offload object.
1377 * @param[out] profile
1378 * Pointer to connection tracking TCP information.
1381 * 0 on success, -1 on failure.
1384 mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,
1385 struct mlx5_aso_ct_action *ct,
1386 struct rte_flow_action_conntrack *profile)
1388 struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
1389 uint32_t poll_wqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
1390 struct mlx5_aso_ct_pool *pool;
1391 char out_data[64 * 2];
1396 mlx5_aso_ct_completion_handle(mng);
1397 ret = mlx5_aso_ct_sq_query_single(mng, ct, out_data);
1402 /* Waiting for wqe resource or state. */
1404 rte_delay_us_sleep(10u);
1405 } while (--poll_wqe_times);
1406 pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
1407 DRV_LOG(ERR, "Fail to send WQE for ASO CT %d in pool %d",
1408 ct->offset, pool->index);
1411 ret = mlx5_aso_ct_wait_ready(sh, ct);
1413 mlx5_aso_ct_obj_analyze(profile, out_data);
1418 * Make sure the conntrack context is synchronized with hardware before
1419 * creating a flow rule that uses it.
1422 * Pointer to shared device context.
1424 * Pointer to connection tracking offload object.
1427 * 0 on success, a negative errno value otherwise and rte_errno is set.
1430 mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,
1431 struct mlx5_aso_ct_action *ct)
1433 struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
1434 uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
1435 enum mlx5_aso_ct_state state =
1436 __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
1438 if (state == ASO_CONNTRACK_FREE) {
1441 } else if (state == ASO_CONNTRACK_READY ||
1442 state == ASO_CONNTRACK_QUERY) {
1446 mlx5_aso_ct_completion_handle(mng);
1447 state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
1448 if (state == ASO_CONNTRACK_READY ||
1449 state == ASO_CONNTRACK_QUERY)
1451 /* Waiting for CQE ready, consider should block or sleep. */
1452 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
1453 } while (--poll_cqe_times);