1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_eal_paging.h>
9 #include <mlx5_malloc.h>
10 #include <mlx5_common_os.h>
11 #include <mlx5_common_devx.h>
14 #include "mlx5_flow.h"
20 * Pointer to the mlx5 common device.
25 mlx5_aso_dereg_mr(struct mlx5_common_device *cdev, struct mlx5_pmd_mr *mr)
27 void *addr = mr->addr;
29 cdev->mr_scache.dereg_mr_cb(mr);
31 memset(mr, 0, sizeof(*mr));
35 * Register Memory Region.
38 * Pointer to the mlx5 common device.
42 * Pointer to MR to create.
45 * 0 on success, a negative errno value otherwise and rte_errno is set.
48 mlx5_aso_reg_mr(struct mlx5_common_device *cdev, size_t length,
49 struct mlx5_pmd_mr *mr)
53 mr->addr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
56 DRV_LOG(ERR, "Failed to create ASO bits mem for MR.");
59 ret = cdev->mr_scache.reg_mr_cb(cdev->pd, mr->addr, length, mr);
61 DRV_LOG(ERR, "Failed to create direct Mkey.");
69 * Destroy Send Queue used for ASO access.
75 mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
77 mlx5_devx_sq_destroy(&sq->sq_obj);
78 mlx5_devx_cq_destroy(&sq->cq.cq_obj);
79 memset(sq, 0, sizeof(*sq));
83 * Initialize Send Queue used for ASO access.
86 * ASO SQ to initialize.
89 mlx5_aso_age_init_sq(struct mlx5_aso_sq *sq)
91 volatile struct mlx5_aso_wqe *restrict wqe;
93 int size = 1 << sq->log_desc_n;
96 /* All the next fields state should stay constant. */
97 for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
98 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
100 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.lkey);
101 addr = (uint64_t)((uint64_t *)sq->mr.addr + i *
102 MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
103 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
104 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
105 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
107 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
108 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
109 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
110 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
111 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
116 * Initialize Send Queue used for ASO flow meter access.
119 * ASO SQ to initialize.
122 mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq)
124 volatile struct mlx5_aso_wqe *restrict wqe;
126 int size = 1 << sq->log_desc_n;
128 /* All the next fields state should stay constant. */
129 for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
130 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
131 (sizeof(*wqe) >> 4));
132 wqe->aso_cseg.operand_masks = RTE_BE32(0u |
133 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
134 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
135 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
136 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
137 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
138 MLX5_COMP_MODE_OFFSET);
143 * Initialize Send Queue used for ASO connection tracking.
146 * ASO SQ to initialize.
149 mlx5_aso_ct_init_sq(struct mlx5_aso_sq *sq)
151 volatile struct mlx5_aso_wqe *restrict wqe;
153 int size = 1 << sq->log_desc_n;
156 /* All the next fields state should stay constant. */
157 for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
158 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
159 (sizeof(*wqe) >> 4));
160 /* One unique MR for the query data. */
161 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.lkey);
162 /* Magic number 64 represents the length of a ASO CT obj. */
163 addr = (uint64_t)((uintptr_t)sq->mr.addr + i * 64);
164 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
165 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
167 * The values of operand_masks are different for modify
169 * And data_mask may be different for each modification. In
170 * query, it could be zero and ignored.
171 * CQE generation is always needed, in order to decide when
172 * it is available to create the flow or read the data.
174 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
175 MLX5_COMP_MODE_OFFSET);
180 * Create Send Queue used for ASO access.
183 * Pointer to the mlx5 common device.
185 * Pointer to SQ to create.
187 * User Access Region object.
190 * 0 on success, a negative errno value otherwise and rte_errno is set.
193 mlx5_aso_sq_create(struct mlx5_common_device *cdev, struct mlx5_aso_sq *sq,
196 struct mlx5_devx_cq_attr cq_attr = {
197 .uar_page_id = mlx5_os_get_devx_uar_page_id(uar),
199 struct mlx5_devx_create_sq_attr sq_attr = {
200 .user_index = 0xFFFF,
201 .wq_attr = (struct mlx5_devx_wq_attr){
203 .uar_page = mlx5_os_get_devx_uar_page_id(uar),
206 mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format),
208 struct mlx5_devx_modify_sq_attr modify_attr = {
209 .state = MLX5_SQC_STATE_RDY,
214 if (mlx5_devx_cq_create(cdev->ctx, &sq->cq.cq_obj,
215 MLX5_ASO_QUEUE_LOG_DESC, &cq_attr,
219 sq->cq.log_desc_n = MLX5_ASO_QUEUE_LOG_DESC;
220 sq->log_desc_n = MLX5_ASO_QUEUE_LOG_DESC;
221 sq_attr.cqn = sq->cq.cq_obj.cq->id;
222 /* for mlx5_aso_wqe that is twice the size of mlx5_wqe */
223 log_wqbb_n = sq->log_desc_n + 1;
224 ret = mlx5_devx_sq_create(cdev->ctx, &sq->sq_obj, log_wqbb_n, &sq_attr,
227 DRV_LOG(ERR, "Can't create SQ object.");
231 ret = mlx5_devx_cmd_modify_sq(sq->sq_obj.sq, &modify_attr);
233 DRV_LOG(ERR, "Can't change SQ state to ready.");
240 sq->sqn = sq->sq_obj.sq->id;
241 rte_spinlock_init(&sq->sqsl);
244 mlx5_aso_destroy_sq(sq);
249 * API to create and initialize Send Queue used for ASO access.
252 * Pointer to shared device context.
253 * @param[in] aso_opc_mod
254 * Mode of ASO feature.
257 * 0 on success, a negative errno value otherwise and rte_errno is set.
260 mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
261 enum mlx5_access_aso_opc_mod aso_opc_mod)
263 uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
264 struct mlx5_common_device *cdev = sh->cdev;
266 switch (aso_opc_mod) {
267 case ASO_OPC_MOD_FLOW_HIT:
268 if (mlx5_aso_reg_mr(cdev, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
269 sq_desc_n, &sh->aso_age_mng->aso_sq.mr))
271 if (mlx5_aso_sq_create(cdev, &sh->aso_age_mng->aso_sq,
273 mlx5_aso_dereg_mr(cdev, &sh->aso_age_mng->aso_sq.mr);
276 mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
278 case ASO_OPC_MOD_POLICER:
279 if (mlx5_aso_sq_create(cdev, &sh->mtrmng->pools_mng.sq,
282 mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
284 case ASO_OPC_MOD_CONNECTION_TRACKING:
285 /* 64B per object for query. */
286 if (mlx5_aso_reg_mr(cdev, 64 * sq_desc_n,
287 &sh->ct_mng->aso_sq.mr))
289 if (mlx5_aso_sq_create(cdev, &sh->ct_mng->aso_sq,
291 mlx5_aso_dereg_mr(cdev, &sh->ct_mng->aso_sq.mr);
294 mlx5_aso_ct_init_sq(&sh->ct_mng->aso_sq);
297 DRV_LOG(ERR, "Unknown ASO operation mode");
304 * API to destroy Send Queue used for ASO access.
307 * Pointer to shared device context.
308 * @param[in] aso_opc_mod
309 * Mode of ASO feature.
312 mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
313 enum mlx5_access_aso_opc_mod aso_opc_mod)
315 struct mlx5_aso_sq *sq;
317 switch (aso_opc_mod) {
318 case ASO_OPC_MOD_FLOW_HIT:
319 mlx5_aso_dereg_mr(sh->cdev, &sh->aso_age_mng->aso_sq.mr);
320 sq = &sh->aso_age_mng->aso_sq;
322 case ASO_OPC_MOD_POLICER:
323 sq = &sh->mtrmng->pools_mng.sq;
325 case ASO_OPC_MOD_CONNECTION_TRACKING:
326 mlx5_aso_dereg_mr(sh->cdev, &sh->ct_mng->aso_sq.mr);
327 sq = &sh->ct_mng->aso_sq;
330 DRV_LOG(ERR, "Unknown ASO operation mode");
333 mlx5_aso_destroy_sq(sq);
337 * Write a burst of WQEs to ASO SQ.
340 * Pointer to shared device context.
342 * Index of the last valid pool.
345 * Number of WQEs in burst.
348 mlx5_aso_sq_enqueue_burst(struct mlx5_dev_ctx_shared *sh, uint16_t n)
350 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
351 volatile struct mlx5_aso_wqe *wqe;
352 struct mlx5_aso_sq *sq = &mng->aso_sq;
353 struct mlx5_aso_age_pool *pool;
354 uint16_t size = 1 << sq->log_desc_n;
355 uint16_t mask = size - 1;
357 uint16_t start_head = sq->head;
359 max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next);
362 sq->elts[start_head & mask].burst_size = max;
364 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
365 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
367 rte_rwlock_read_lock(&mng->resize_rwl);
368 pool = mng->pools[sq->next];
369 rte_rwlock_read_unlock(&mng->resize_rwl);
370 sq->elts[sq->head & mask].pool = pool;
371 wqe->general_cseg.misc =
372 rte_cpu_to_be_32(((struct mlx5_devx_obj *)
373 (pool->flow_hit_aso_obj))->id);
374 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
375 MLX5_COMP_MODE_OFFSET);
376 wqe->general_cseg.opcode = rte_cpu_to_be_32
377 (MLX5_OPCODE_ACCESS_ASO |
378 (ASO_OPC_MOD_FLOW_HIT <<
379 WQE_CSEG_OPC_MOD_OFFSET) |
381 WQE_CSEG_WQE_INDEX_OFFSET));
382 sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
387 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
388 MLX5_COMP_MODE_OFFSET);
389 mlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,
390 sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],
392 return sq->elts[start_head & mask].burst_size;
396 * Debug utility function. Dump contents of error CQE and WQE.
404 mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
408 DRV_LOG(ERR, "Error cqe:");
409 for (i = 0; i < 16; i += 4)
410 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
411 cqe[i + 2], cqe[i + 3]);
412 DRV_LOG(ERR, "\nError wqe:");
413 for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
414 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
415 wqe[i + 2], wqe[i + 3]);
419 * Handle case of error CQE.
425 mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
427 struct mlx5_aso_cq *cq = &sq->cq;
428 uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
429 volatile struct mlx5_err_cqe *cqe =
430 (volatile struct mlx5_err_cqe *)&cq->cq_obj.cqes[idx];
433 idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
434 mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
435 (volatile uint32_t *)&sq->sq_obj.aso_wqes[idx]);
439 * Update ASO objects upon completion.
442 * Shared device context.
444 * Number of completed ASO objects.
447 mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
449 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
450 struct mlx5_aso_sq *sq = &mng->aso_sq;
451 struct mlx5_age_info *age_info;
452 const uint16_t size = 1 << sq->log_desc_n;
453 const uint16_t mask = size - 1;
454 const uint64_t curr = MLX5_CURR_TIME_SEC;
455 uint16_t expected = AGE_CANDIDATE;
458 for (i = 0; i < n; ++i) {
459 uint16_t idx = (sq->tail + i) & mask;
460 struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
461 uint64_t diff = curr - pool->time_of_last_age_check;
462 uint64_t *addr = sq->mr.addr;
465 addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
466 pool->time_of_last_age_check = curr;
467 for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
468 struct mlx5_aso_age_action *act = &pool->actions[j];
469 struct mlx5_age_param *ap = &act->age_params;
475 if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
480 u8addr = (uint8_t *)addr;
481 hit = (u8addr[byte] >> offset) & 0x1;
483 __atomic_store_n(&ap->sec_since_last_hit, 0,
486 struct mlx5_priv *priv;
488 __atomic_fetch_add(&ap->sec_since_last_hit,
489 diff, __ATOMIC_RELAXED);
490 /* If timeout passed add to aged-out list. */
491 if (ap->sec_since_last_hit <= ap->timeout)
494 rte_eth_devices[ap->port_id].data->dev_private;
495 age_info = GET_PORT_AGE_INFO(priv);
496 rte_spinlock_lock(&age_info->aged_sl);
497 if (__atomic_compare_exchange_n(&ap->state,
503 LIST_INSERT_HEAD(&age_info->aged_aso,
505 MLX5_AGE_SET(age_info,
508 rte_spinlock_unlock(&age_info->aged_sl);
512 mlx5_age_event_prepare(sh);
516 * Handle completions from WQEs sent to ASO SQ.
519 * Shared device context.
522 * Number of CQEs handled.
525 mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
527 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
528 struct mlx5_aso_sq *sq = &mng->aso_sq;
529 struct mlx5_aso_cq *cq = &sq->cq;
530 volatile struct mlx5_cqe *restrict cqe;
531 const unsigned int cq_size = 1 << cq->log_desc_n;
532 const unsigned int mask = cq_size - 1;
534 uint32_t next_idx = cq->cq_ci & mask;
535 const uint16_t max = (uint16_t)(sq->head - sq->tail);
542 next_idx = (cq->cq_ci + 1) & mask;
543 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
544 cqe = &cq->cq_obj.cqes[idx];
545 ret = check_cqe(cqe, cq_size, cq->cq_ci);
547 * Be sure owner read is done before any other cookie field or
551 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
552 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
554 mlx5_aso_cqe_err_handle(sq);
556 i += sq->elts[(sq->tail + i) & mask].burst_size;
561 mlx5_aso_age_action_update(sh, i);
564 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
570 * Periodically read CQEs and send WQEs to ASO SQ.
573 * Shared device context containing the ASO SQ.
576 mlx5_flow_aso_alarm(void *arg)
578 struct mlx5_dev_ctx_shared *sh = arg;
579 struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
583 rte_rwlock_read_lock(&sh->aso_age_mng->resize_rwl);
584 n = sh->aso_age_mng->next;
585 rte_rwlock_read_unlock(&sh->aso_age_mng->resize_rwl);
586 mlx5_aso_completion_handle(sh);
588 /* End of loop: wait 1 second. */
592 mlx5_aso_sq_enqueue_burst(sh, n);
593 if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
594 DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
598 * API to start ASO access using ASO SQ.
601 * Pointer to shared device context.
604 * 0 on success, a negative errno value otherwise and rte_errno is set.
607 mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh)
609 if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
610 DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
617 * API to stop ASO access using ASO SQ.
620 * Pointer to shared device context.
623 * 0 on success, a negative errno value otherwise and rte_errno is set.
626 mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh)
630 if (!sh->aso_age_mng->aso_sq.sq_obj.sq)
634 rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
635 if (rte_errno != EINPROGRESS)
643 mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,
644 struct mlx5_aso_sq *sq,
645 struct mlx5_aso_mtr *aso_mtr)
647 volatile struct mlx5_aso_wqe *wqe = NULL;
648 struct mlx5_flow_meter_info *fm = NULL;
649 struct mlx5_flow_meter_profile *fmp;
650 uint16_t size = 1 << sq->log_desc_n;
651 uint16_t mask = size - 1;
653 uint32_t dseg_idx = 0;
654 struct mlx5_aso_mtr_pool *pool = NULL;
656 rte_spinlock_lock(&sq->sqsl);
657 res = size - (uint16_t)(sq->head - sq->tail);
658 if (unlikely(!res)) {
659 DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send");
660 rte_spinlock_unlock(&sq->sqsl);
663 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
664 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
667 sq->elts[sq->head & mask].mtr = aso_mtr;
668 pool = container_of(aso_mtr, struct mlx5_aso_mtr_pool,
669 mtrs[aso_mtr->offset]);
670 wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
671 (aso_mtr->offset >> 1));
672 wqe->general_cseg.opcode = rte_cpu_to_be_32(MLX5_OPCODE_ACCESS_ASO |
673 (ASO_OPC_MOD_POLICER <<
674 WQE_CSEG_OPC_MOD_OFFSET) |
675 sq->pi << WQE_CSEG_WQE_INDEX_OFFSET);
676 /* There are 2 meters in one ASO cache line. */
677 dseg_idx = aso_mtr->offset & 0x1;
678 wqe->aso_cseg.data_mask =
679 RTE_BE64(MLX5_IFC_FLOW_METER_PARAM_MASK << (32 * !dseg_idx));
681 wqe->aso_dseg.mtrs[dseg_idx].cbs_cir =
682 fm->profile->srtcm_prm.cbs_cir;
683 wqe->aso_dseg.mtrs[dseg_idx].ebs_eir =
684 fm->profile->srtcm_prm.ebs_eir;
686 wqe->aso_dseg.mtrs[dseg_idx].cbs_cir =
687 RTE_BE32(MLX5_IFC_FLOW_METER_DISABLE_CBS_CIR_VAL);
688 wqe->aso_dseg.mtrs[dseg_idx].ebs_eir = 0;
691 if (fmp->profile.packet_mode)
692 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm =
693 RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
694 (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET) |
695 (MLX5_METER_MODE_PKT << ASO_DSEG_MTR_MODE));
697 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm =
698 RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
699 (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET));
700 switch (fmp->profile.alg) {
701 case RTE_MTR_SRTCM_RFC2697:
702 /* Only needed for RFC2697. */
703 if (fm->profile->srtcm_prm.ebs_eir)
704 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm |=
705 RTE_BE32(1 << ASO_DSEG_BO_OFFSET);
707 case RTE_MTR_TRTCM_RFC2698:
708 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm |=
709 RTE_BE32(1 << ASO_DSEG_BBOG_OFFSET);
711 case RTE_MTR_TRTCM_RFC4115:
717 * Due to software performance reason, the token fields will not be
718 * set when posting the WQE to ASO SQ. It will be filled by the HW
722 sq->pi += 2;/* Each WQE contains 2 WQEBB's. */
723 mlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,
724 sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],
726 rte_spinlock_unlock(&sq->sqsl);
731 mlx5_aso_mtrs_status_update(struct mlx5_aso_sq *sq, uint16_t aso_mtrs_nums)
733 uint16_t size = 1 << sq->log_desc_n;
734 uint16_t mask = size - 1;
736 struct mlx5_aso_mtr *aso_mtr = NULL;
737 uint8_t exp_state = ASO_METER_WAIT;
739 for (i = 0; i < aso_mtrs_nums; ++i) {
740 aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
741 MLX5_ASSERT(aso_mtr);
742 (void)__atomic_compare_exchange_n(&aso_mtr->state,
743 &exp_state, ASO_METER_READY,
744 false, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
749 mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq)
751 struct mlx5_aso_cq *cq = &sq->cq;
752 volatile struct mlx5_cqe *restrict cqe;
753 const unsigned int cq_size = 1 << cq->log_desc_n;
754 const unsigned int mask = cq_size - 1;
756 uint32_t next_idx = cq->cq_ci & mask;
761 rte_spinlock_lock(&sq->sqsl);
762 max = (uint16_t)(sq->head - sq->tail);
763 if (unlikely(!max)) {
764 rte_spinlock_unlock(&sq->sqsl);
769 next_idx = (cq->cq_ci + 1) & mask;
770 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
771 cqe = &cq->cq_obj.cqes[idx];
772 ret = check_cqe(cqe, cq_size, cq->cq_ci);
774 * Be sure owner read is done before any other cookie field or
778 if (ret != MLX5_CQE_STATUS_SW_OWN) {
779 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
781 mlx5_aso_cqe_err_handle(sq);
788 mlx5_aso_mtrs_status_update(sq, n);
791 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
793 rte_spinlock_unlock(&sq->sqsl);
797 * Update meter parameter by send WQE.
800 * Pointer to Ethernet device.
802 * Pointer to mlx5 private data structure.
804 * Pointer to flow meter to be modified.
807 * 0 on success, a negative errno value otherwise and rte_errno is set.
810 mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
811 struct mlx5_aso_mtr *mtr)
813 struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq;
814 uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
817 mlx5_aso_mtr_completion_handle(sq);
818 if (mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr))
820 /* Waiting for wqe resource. */
821 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
822 } while (--poll_wqe_times);
823 DRV_LOG(ERR, "Fail to send WQE for ASO meter offset %d",
829 * Wait for meter to be ready.
832 * Pointer to Ethernet device.
834 * Pointer to mlx5 private data structure.
836 * Pointer to flow meter to be modified.
839 * 0 on success, a negative errno value otherwise and rte_errno is set.
842 mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,
843 struct mlx5_aso_mtr *mtr)
845 struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq;
846 uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
848 if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
852 mlx5_aso_mtr_completion_handle(sq);
853 if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
856 /* Waiting for CQE ready. */
857 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
858 } while (--poll_cqe_times);
859 DRV_LOG(ERR, "Fail to poll CQE ready for ASO meter offset %d",
865 * Post a WQE to the ASO CT SQ to modify the context.
868 * Pointer to shared device context.
870 * Pointer to the generic CT structure related to the context.
872 * Pointer to configuration profile.
875 * 1 on success (WQE number), 0 on failure.
878 mlx5_aso_ct_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,
879 struct mlx5_aso_ct_action *ct,
880 const struct rte_flow_action_conntrack *profile)
882 volatile struct mlx5_aso_wqe *wqe = NULL;
883 struct mlx5_aso_sq *sq = &sh->ct_mng->aso_sq;
884 uint16_t size = 1 << sq->log_desc_n;
885 uint16_t mask = size - 1;
887 struct mlx5_aso_ct_pool *pool;
892 rte_spinlock_lock(&sq->sqsl);
893 /* Prevent other threads to update the index. */
894 res = size - (uint16_t)(sq->head - sq->tail);
895 if (unlikely(!res)) {
896 rte_spinlock_unlock(&sq->sqsl);
897 DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send");
900 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
901 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
903 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_WAIT);
904 sq->elts[sq->head & mask].ct = ct;
905 sq->elts[sq->head & mask].query_data = NULL;
906 pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
907 /* Each WQE will have a single CT object. */
908 wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
910 wqe->general_cseg.opcode = rte_cpu_to_be_32(MLX5_OPCODE_ACCESS_ASO |
911 (ASO_OPC_MOD_CONNECTION_TRACKING <<
912 WQE_CSEG_OPC_MOD_OFFSET) |
913 sq->pi << WQE_CSEG_WQE_INDEX_OFFSET);
914 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
916 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
917 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
918 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
919 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
920 wqe->aso_cseg.data_mask = UINT64_MAX;
921 /* To make compiler happy. */
922 desg = (void *)(uintptr_t)wqe->aso_dseg.data;
923 MLX5_SET(conn_track_aso, desg, valid, 1);
924 MLX5_SET(conn_track_aso, desg, state, profile->state);
925 MLX5_SET(conn_track_aso, desg, freeze_track, !profile->enable);
926 MLX5_SET(conn_track_aso, desg, connection_assured,
927 profile->live_connection);
928 MLX5_SET(conn_track_aso, desg, sack_permitted, profile->selective_ack);
929 MLX5_SET(conn_track_aso, desg, challenged_acked,
930 profile->challenge_ack_passed);
931 /* Heartbeat, retransmission_counter, retranmission_limit_exceeded: 0 */
932 MLX5_SET(conn_track_aso, desg, heartbeat, 0);
933 MLX5_SET(conn_track_aso, desg, max_ack_window,
934 profile->max_ack_window);
935 MLX5_SET(conn_track_aso, desg, retransmission_counter, 0);
936 MLX5_SET(conn_track_aso, desg, retranmission_limit_exceeded, 0);
937 MLX5_SET(conn_track_aso, desg, retranmission_limit,
938 profile->retransmission_limit);
939 MLX5_SET(conn_track_aso, desg, reply_direction_tcp_scale,
940 profile->reply_dir.scale);
941 MLX5_SET(conn_track_aso, desg, reply_direction_tcp_close_initiated,
942 profile->reply_dir.close_initiated);
943 /* Both directions will use the same liberal mode. */
944 MLX5_SET(conn_track_aso, desg, reply_direction_tcp_liberal_enabled,
945 profile->liberal_mode);
946 MLX5_SET(conn_track_aso, desg, reply_direction_tcp_data_unacked,
947 profile->reply_dir.data_unacked);
948 MLX5_SET(conn_track_aso, desg, reply_direction_tcp_max_ack,
949 profile->reply_dir.last_ack_seen);
950 MLX5_SET(conn_track_aso, desg, original_direction_tcp_scale,
951 profile->original_dir.scale);
952 MLX5_SET(conn_track_aso, desg, original_direction_tcp_close_initiated,
953 profile->original_dir.close_initiated);
954 MLX5_SET(conn_track_aso, desg, original_direction_tcp_liberal_enabled,
955 profile->liberal_mode);
956 MLX5_SET(conn_track_aso, desg, original_direction_tcp_data_unacked,
957 profile->original_dir.data_unacked);
958 MLX5_SET(conn_track_aso, desg, original_direction_tcp_max_ack,
959 profile->original_dir.last_ack_seen);
960 MLX5_SET(conn_track_aso, desg, last_win, profile->last_window);
961 MLX5_SET(conn_track_aso, desg, last_dir, profile->last_direction);
962 MLX5_SET(conn_track_aso, desg, last_index, profile->last_index);
963 MLX5_SET(conn_track_aso, desg, last_seq, profile->last_seq);
964 MLX5_SET(conn_track_aso, desg, last_ack, profile->last_ack);
965 MLX5_SET(conn_track_aso, desg, last_end, profile->last_end);
966 orig_dir = MLX5_ADDR_OF(conn_track_aso, desg, original_dir);
967 MLX5_SET(tcp_window_params, orig_dir, sent_end,
968 profile->original_dir.sent_end);
969 MLX5_SET(tcp_window_params, orig_dir, reply_end,
970 profile->original_dir.reply_end);
971 MLX5_SET(tcp_window_params, orig_dir, max_win,
972 profile->original_dir.max_win);
973 MLX5_SET(tcp_window_params, orig_dir, max_ack,
974 profile->original_dir.max_ack);
975 reply_dir = MLX5_ADDR_OF(conn_track_aso, desg, reply_dir);
976 MLX5_SET(tcp_window_params, reply_dir, sent_end,
977 profile->reply_dir.sent_end);
978 MLX5_SET(tcp_window_params, reply_dir, reply_end,
979 profile->reply_dir.reply_end);
980 MLX5_SET(tcp_window_params, reply_dir, max_win,
981 profile->reply_dir.max_win);
982 MLX5_SET(tcp_window_params, reply_dir, max_ack,
983 profile->reply_dir.max_ack);
985 sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
986 mlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,
987 sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],
989 rte_spinlock_unlock(&sq->sqsl);
994 * Update the status field of CTs to indicate ready to be used by flows.
995 * A continuous number of CTs since last update.
998 * Pointer to ASO CT SQ.
1000 * Number of CT structures to be updated.
1003 * 0 on success, a negative value.
1006 mlx5_aso_ct_status_update(struct mlx5_aso_sq *sq, uint16_t num)
1008 uint16_t size = 1 << sq->log_desc_n;
1009 uint16_t mask = size - 1;
1011 struct mlx5_aso_ct_action *ct = NULL;
1014 for (i = 0; i < num; i++) {
1015 idx = (uint16_t)((sq->tail + i) & mask);
1016 ct = sq->elts[idx].ct;
1018 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_READY);
1019 if (sq->elts[idx].query_data)
1020 rte_memcpy(sq->elts[idx].query_data,
1021 (char *)((uintptr_t)sq->mr.addr + idx * 64),
1027 * Post a WQE to the ASO CT SQ to query the current context.
1030 * Pointer to shared device context.
1032 * Pointer to the generic CT structure related to the context.
1034 * Pointer to data area to be filled.
1037 * 1 on success (WQE number), 0 on failure.
1040 mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh,
1041 struct mlx5_aso_ct_action *ct, char *data)
1043 volatile struct mlx5_aso_wqe *wqe = NULL;
1044 struct mlx5_aso_sq *sq = &sh->ct_mng->aso_sq;
1045 uint16_t size = 1 << sq->log_desc_n;
1046 uint16_t mask = size - 1;
1049 struct mlx5_aso_ct_pool *pool;
1050 enum mlx5_aso_ct_state state =
1051 __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
1053 if (state == ASO_CONNTRACK_FREE) {
1054 DRV_LOG(ERR, "Fail: No context to query");
1056 } else if (state == ASO_CONNTRACK_WAIT) {
1059 rte_spinlock_lock(&sq->sqsl);
1060 res = size - (uint16_t)(sq->head - sq->tail);
1061 if (unlikely(!res)) {
1062 rte_spinlock_unlock(&sq->sqsl);
1063 DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send");
1066 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_QUERY);
1067 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
1068 /* Confirm the location and address of the prefetch instruction. */
1069 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
1070 /* Fill next WQE. */
1071 wqe_idx = sq->head & mask;
1072 sq->elts[wqe_idx].ct = ct;
1073 sq->elts[wqe_idx].query_data = data;
1074 pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
1075 /* Each WQE will have a single CT object. */
1076 wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
1078 wqe->general_cseg.opcode = rte_cpu_to_be_32(MLX5_OPCODE_ACCESS_ASO |
1079 (ASO_OPC_MOD_CONNECTION_TRACKING <<
1080 WQE_CSEG_OPC_MOD_OFFSET) |
1081 sq->pi << WQE_CSEG_WQE_INDEX_OFFSET);
1083 * There is no write request is required.
1084 * ASO_OPER_LOGICAL_AND and ASO_OP_ALWAYS_FALSE are both 0.
1085 * "BYTEWISE_64BYTE" is needed for a whole context.
1086 * Set to 0 directly to reduce an endian swap. (Modify should rewrite.)
1087 * "data_mask" is ignored.
1088 * Buffer address was already filled during initialization.
1090 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32(BYTEWISE_64BYTE <<
1091 ASO_CSEG_DATA_MASK_MODE_OFFSET);
1092 wqe->aso_cseg.data_mask = 0;
1095 * Each WQE contains 2 WQEBB's, even though
1096 * data segment is not used in this case.
1099 mlx5_doorbell_ring(&sh->tx_uar.bf_db, *(volatile uint64_t *)wqe,
1100 sq->pi, &sq->sq_obj.db_rec[MLX5_SND_DBR],
1102 rte_spinlock_unlock(&sq->sqsl);
1107 * Handle completions from WQEs sent to ASO CT.
1110 * Pointer to the CT pools management structure.
1113 mlx5_aso_ct_completion_handle(struct mlx5_aso_ct_pools_mng *mng)
1115 struct mlx5_aso_sq *sq = &mng->aso_sq;
1116 struct mlx5_aso_cq *cq = &sq->cq;
1117 volatile struct mlx5_cqe *restrict cqe;
1118 const uint32_t cq_size = 1 << cq->log_desc_n;
1119 const uint32_t mask = cq_size - 1;
1126 rte_spinlock_lock(&sq->sqsl);
1127 max = (uint16_t)(sq->head - sq->tail);
1128 if (unlikely(!max)) {
1129 rte_spinlock_unlock(&sq->sqsl);
1132 next_idx = cq->cq_ci & mask;
1135 next_idx = (cq->cq_ci + 1) & mask;
1136 /* Need to confirm the position of the prefetch. */
1137 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
1138 cqe = &cq->cq_obj.cqes[idx];
1139 ret = check_cqe(cqe, cq_size, cq->cq_ci);
1141 * Be sure owner read is done before any other cookie field or
1145 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1146 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
1148 mlx5_aso_cqe_err_handle(sq);
1155 mlx5_aso_ct_status_update(sq, n);
1158 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
1160 rte_spinlock_unlock(&sq->sqsl);
1164 * Update connection tracking ASO context by sending WQE.
1167 * Pointer to mlx5_dev_ctx_shared object.
1169 * Pointer to connection tracking offload object.
1170 * @param[in] profile
1171 * Pointer to connection tracking TCP parameter.
1174 * 0 on success, -1 on failure.
1177 mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
1178 struct mlx5_aso_ct_action *ct,
1179 const struct rte_flow_action_conntrack *profile)
1181 uint32_t poll_wqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
1182 struct mlx5_aso_ct_pool *pool;
1186 mlx5_aso_ct_completion_handle(sh->ct_mng);
1187 if (mlx5_aso_ct_sq_enqueue_single(sh, ct, profile))
1189 /* Waiting for wqe resource. */
1190 rte_delay_us_sleep(10u);
1191 } while (--poll_wqe_times);
1192 pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
1193 DRV_LOG(ERR, "Fail to send WQE for ASO CT %d in pool %d",
1194 ct->offset, pool->index);
1199 * The routine is used to wait for WQE completion to continue with queried data.
1202 * Pointer to mlx5_dev_ctx_shared object.
1204 * Pointer to connection tracking offload object.
1207 * 0 on success, -1 on failure.
1210 mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh,
1211 struct mlx5_aso_ct_action *ct)
1213 struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
1214 uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
1215 struct mlx5_aso_ct_pool *pool;
1217 if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
1218 ASO_CONNTRACK_READY)
1221 mlx5_aso_ct_completion_handle(mng);
1222 if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
1223 ASO_CONNTRACK_READY)
1225 /* Waiting for CQE ready, consider should block or sleep. */
1226 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
1227 } while (--poll_cqe_times);
1228 pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
1229 DRV_LOG(ERR, "Fail to poll CQE for ASO CT %d in pool %d",
1230 ct->offset, pool->index);
1235 * Convert the hardware conntrack data format into the profile.
1237 * @param[in] profile
1238 * Pointer to conntrack profile to be filled after query.
1240 * Pointer to data fetched from hardware.
1243 mlx5_aso_ct_obj_analyze(struct rte_flow_action_conntrack *profile,
1246 void *o_dir = MLX5_ADDR_OF(conn_track_aso, wdata, original_dir);
1247 void *r_dir = MLX5_ADDR_OF(conn_track_aso, wdata, reply_dir);
1249 /* MLX5_GET16 should be taken into consideration. */
1250 profile->state = (enum rte_flow_conntrack_state)
1251 MLX5_GET(conn_track_aso, wdata, state);
1252 profile->enable = !MLX5_GET(conn_track_aso, wdata, freeze_track);
1253 profile->selective_ack = MLX5_GET(conn_track_aso, wdata,
1255 profile->live_connection = MLX5_GET(conn_track_aso, wdata,
1256 connection_assured);
1257 profile->challenge_ack_passed = MLX5_GET(conn_track_aso, wdata,
1259 profile->max_ack_window = MLX5_GET(conn_track_aso, wdata,
1261 profile->retransmission_limit = MLX5_GET(conn_track_aso, wdata,
1262 retranmission_limit);
1263 profile->last_window = MLX5_GET(conn_track_aso, wdata, last_win);
1264 profile->last_direction = MLX5_GET(conn_track_aso, wdata, last_dir);
1265 profile->last_index = (enum rte_flow_conntrack_tcp_last_index)
1266 MLX5_GET(conn_track_aso, wdata, last_index);
1267 profile->last_seq = MLX5_GET(conn_track_aso, wdata, last_seq);
1268 profile->last_ack = MLX5_GET(conn_track_aso, wdata, last_ack);
1269 profile->last_end = MLX5_GET(conn_track_aso, wdata, last_end);
1270 profile->liberal_mode = MLX5_GET(conn_track_aso, wdata,
1271 reply_direction_tcp_liberal_enabled) |
1272 MLX5_GET(conn_track_aso, wdata,
1273 original_direction_tcp_liberal_enabled);
1274 /* No liberal in the RTE structure profile. */
1275 profile->reply_dir.scale = MLX5_GET(conn_track_aso, wdata,
1276 reply_direction_tcp_scale);
1277 profile->reply_dir.close_initiated = MLX5_GET(conn_track_aso, wdata,
1278 reply_direction_tcp_close_initiated);
1279 profile->reply_dir.data_unacked = MLX5_GET(conn_track_aso, wdata,
1280 reply_direction_tcp_data_unacked);
1281 profile->reply_dir.last_ack_seen = MLX5_GET(conn_track_aso, wdata,
1282 reply_direction_tcp_max_ack);
1283 profile->reply_dir.sent_end = MLX5_GET(tcp_window_params,
1285 profile->reply_dir.reply_end = MLX5_GET(tcp_window_params,
1287 profile->reply_dir.max_win = MLX5_GET(tcp_window_params,
1289 profile->reply_dir.max_ack = MLX5_GET(tcp_window_params,
1291 profile->original_dir.scale = MLX5_GET(conn_track_aso, wdata,
1292 original_direction_tcp_scale);
1293 profile->original_dir.close_initiated = MLX5_GET(conn_track_aso, wdata,
1294 original_direction_tcp_close_initiated);
1295 profile->original_dir.data_unacked = MLX5_GET(conn_track_aso, wdata,
1296 original_direction_tcp_data_unacked);
1297 profile->original_dir.last_ack_seen = MLX5_GET(conn_track_aso, wdata,
1298 original_direction_tcp_max_ack);
1299 profile->original_dir.sent_end = MLX5_GET(tcp_window_params,
1301 profile->original_dir.reply_end = MLX5_GET(tcp_window_params,
1303 profile->original_dir.max_win = MLX5_GET(tcp_window_params,
1305 profile->original_dir.max_ack = MLX5_GET(tcp_window_params,
1310 * Query connection tracking information parameter by send WQE.
1313 * Pointer to Ethernet device.
1315 * Pointer to connection tracking offload object.
1316 * @param[out] profile
1317 * Pointer to connection tracking TCP information.
1320 * 0 on success, -1 on failure.
1323 mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,
1324 struct mlx5_aso_ct_action *ct,
1325 struct rte_flow_action_conntrack *profile)
1327 uint32_t poll_wqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
1328 struct mlx5_aso_ct_pool *pool;
1329 char out_data[64 * 2];
1334 mlx5_aso_ct_completion_handle(sh->ct_mng);
1335 ret = mlx5_aso_ct_sq_query_single(sh, ct, out_data);
1340 /* Waiting for wqe resource or state. */
1342 rte_delay_us_sleep(10u);
1343 } while (--poll_wqe_times);
1344 pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
1345 DRV_LOG(ERR, "Fail to send WQE for ASO CT %d in pool %d",
1346 ct->offset, pool->index);
1349 ret = mlx5_aso_ct_wait_ready(sh, ct);
1351 mlx5_aso_ct_obj_analyze(profile, out_data);
1356 * Make sure the conntrack context is synchronized with hardware before
1357 * creating a flow rule that uses it.
1360 * Pointer to shared device context.
1362 * Pointer to connection tracking offload object.
1365 * 0 on success, a negative errno value otherwise and rte_errno is set.
1368 mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,
1369 struct mlx5_aso_ct_action *ct)
1371 struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
1372 uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
1373 enum mlx5_aso_ct_state state =
1374 __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
1376 if (state == ASO_CONNTRACK_FREE) {
1379 } else if (state == ASO_CONNTRACK_READY ||
1380 state == ASO_CONNTRACK_QUERY) {
1384 mlx5_aso_ct_completion_handle(mng);
1385 state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
1386 if (state == ASO_CONNTRACK_READY ||
1387 state == ASO_CONNTRACK_QUERY)
1389 /* Waiting for CQE ready, consider should block or sleep. */
1390 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
1391 } while (--poll_cqe_times);