1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_eal_paging.h>
9 #include <mlx5_malloc.h>
10 #include <mlx5_common_os.h>
11 #include <mlx5_common_devx.h>
14 #include "mlx5_flow.h"
17 * Destroy Completion Queue used for ASO access.
23 mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
26 mlx5_devx_cq_destroy(&cq->cq_obj);
27 memset(cq, 0, sizeof(*cq));
31 * Create Completion Queue used for ASO access.
34 * Context returned from mlx5 open_device() glue function.
36 * Pointer to CQ to create.
37 * @param[in] log_desc_n
38 * Log of number of descriptors in queue.
40 * Socket to use for allocation.
41 * @param[in] uar_page_id
45 * 0 on success, a negative errno value otherwise and rte_errno is set.
48 mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
49 int socket, int uar_page_id)
51 struct mlx5_devx_cq_attr attr = {
52 .uar_page_id = uar_page_id,
55 cq->log_desc_n = log_desc_n;
57 return mlx5_devx_cq_create(ctx, &cq->cq_obj, log_desc_n, &attr, socket);
67 mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
69 claim_zero(mlx5_devx_cmd_destroy(mr->mkey));
70 if (!mr->is_indirect && mr->umem)
71 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
73 memset(mr, 0, sizeof(*mr));
77 * Register Memory Region.
80 * Context returned from mlx5 open_device() glue function.
84 * Pointer to MR to create.
86 * Socket to use for allocation.
88 * Protection Domain number to use.
91 * 0 on success, a negative errno value otherwise and rte_errno is set.
94 mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
97 struct mlx5_devx_mkey_attr mkey_attr;
99 mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
102 DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx.");
105 mr->umem = mlx5_os_umem_reg(ctx, mr->buf, length,
106 IBV_ACCESS_LOCAL_WRITE);
108 DRV_LOG(ERR, "Failed to register Umem for MR by Devx.");
111 mkey_attr.addr = (uintptr_t)mr->buf;
112 mkey_attr.size = length;
113 mkey_attr.umem_id = mlx5_os_get_umem_id(mr->umem);
115 mkey_attr.pg_access = 1;
116 mkey_attr.klm_array = NULL;
117 mkey_attr.klm_num = 0;
118 mkey_attr.relaxed_ordering_read = 0;
119 mkey_attr.relaxed_ordering_write = 0;
120 mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr);
122 DRV_LOG(ERR, "Failed to create direct Mkey.");
126 mr->is_indirect = false;
130 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
136 * Destroy Send Queue used for ASO access.
142 mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
144 mlx5_devx_sq_destroy(&sq->sq_obj);
145 mlx5_aso_cq_destroy(&sq->cq);
146 memset(sq, 0, sizeof(*sq));
150 * Initialize Send Queue used for ASO access.
153 * ASO SQ to initialize.
156 mlx5_aso_age_init_sq(struct mlx5_aso_sq *sq)
158 volatile struct mlx5_aso_wqe *restrict wqe;
160 int size = 1 << sq->log_desc_n;
163 /* All the next fields state should stay constant. */
164 for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
165 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
166 (sizeof(*wqe) >> 4));
167 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id);
168 addr = (uint64_t)((uint64_t *)sq->mr.buf + i *
169 MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
170 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
171 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
172 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
174 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
175 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
176 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
177 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
178 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
183 * Initialize Send Queue used for ASO flow meter access.
186 * ASO SQ to initialize.
189 mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq)
191 volatile struct mlx5_aso_wqe *restrict wqe;
193 int size = 1 << sq->log_desc_n;
195 /* All the next fields state should stay constant. */
196 for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
197 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
198 (sizeof(*wqe) >> 4));
199 wqe->aso_cseg.operand_masks = RTE_BE32(0u |
200 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
201 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
202 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
203 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
204 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
205 MLX5_COMP_MODE_OFFSET);
210 * Create Send Queue used for ASO access.
213 * Context returned from mlx5 open_device() glue function.
215 * Pointer to SQ to create.
217 * Socket to use for allocation.
219 * User Access Region object.
221 * Protection Domain number to use.
222 * @param[in] log_desc_n
223 * Log of number of descriptors in queue.
226 * 0 on success, a negative errno value otherwise and rte_errno is set.
229 mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
230 void *uar, uint32_t pdn, uint16_t log_desc_n,
233 struct mlx5_devx_create_sq_attr attr = {
234 .user_index = 0xFFFF,
235 .wq_attr = (struct mlx5_devx_wq_attr){
237 .uar_page = mlx5_os_get_devx_uar_page_id(uar),
239 .ts_format = mlx5_ts_format_conv(ts_format),
241 struct mlx5_devx_modify_sq_attr modify_attr = {
242 .state = MLX5_SQC_STATE_RDY,
247 if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
248 mlx5_os_get_devx_uar_page_id(uar)))
250 sq->log_desc_n = log_desc_n;
251 attr.cqn = sq->cq.cq_obj.cq->id;
252 /* for mlx5_aso_wqe that is twice the size of mlx5_wqe */
253 log_wqbb_n = log_desc_n + 1;
254 ret = mlx5_devx_sq_create(ctx, &sq->sq_obj, log_wqbb_n, &attr, socket);
256 DRV_LOG(ERR, "Can't create SQ object.");
260 ret = mlx5_devx_cmd_modify_sq(sq->sq_obj.sq, &modify_attr);
262 DRV_LOG(ERR, "Can't change SQ state to ready.");
269 sq->sqn = sq->sq_obj.sq->id;
270 sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar);
271 rte_spinlock_init(&sq->sqsl);
274 mlx5_aso_destroy_sq(sq);
279 * API to create and initialize Send Queue used for ASO access.
282 * Pointer to shared device context.
285 * 0 on success, a negative errno value otherwise and rte_errno is set.
288 mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
289 enum mlx5_access_aso_opc_mod aso_opc_mod)
291 uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
293 switch (aso_opc_mod) {
294 case ASO_OPC_MOD_FLOW_HIT:
295 if (mlx5_aso_devx_reg_mr(sh->ctx,
296 (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
297 sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0, sh->pdn))
299 if (mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
300 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
302 mlx5_aso_devx_dereg_mr(&sh->aso_age_mng->aso_sq.mr);
305 mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
307 case ASO_OPC_MOD_POLICER:
308 if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->pools_mng.sq, 0,
309 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
312 mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
315 DRV_LOG(ERR, "Unknown ASO operation mode");
322 * API to destroy Send Queue used for ASO access.
325 * Pointer to shared device context.
328 mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
329 enum mlx5_access_aso_opc_mod aso_opc_mod)
331 struct mlx5_aso_sq *sq;
333 switch (aso_opc_mod) {
334 case ASO_OPC_MOD_FLOW_HIT:
335 mlx5_aso_devx_dereg_mr(&sh->aso_age_mng->aso_sq.mr);
336 sq = &sh->aso_age_mng->aso_sq;
338 case ASO_OPC_MOD_POLICER:
339 sq = &sh->mtrmng->pools_mng.sq;
342 DRV_LOG(ERR, "Unknown ASO operation mode");
345 mlx5_aso_destroy_sq(sq);
349 * Write a burst of WQEs to ASO SQ.
352 * ASO management data, contains the SQ.
354 * Index of the last valid pool.
357 * Number of WQEs in burst.
360 mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
362 volatile struct mlx5_aso_wqe *wqe;
363 struct mlx5_aso_sq *sq = &mng->aso_sq;
364 struct mlx5_aso_age_pool *pool;
365 uint16_t size = 1 << sq->log_desc_n;
366 uint16_t mask = size - 1;
368 uint16_t start_head = sq->head;
370 max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next);
373 sq->elts[start_head & mask].burst_size = max;
375 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
376 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
378 rte_spinlock_lock(&mng->resize_sl);
379 pool = mng->pools[sq->next];
380 rte_spinlock_unlock(&mng->resize_sl);
381 sq->elts[sq->head & mask].pool = pool;
382 wqe->general_cseg.misc =
383 rte_cpu_to_be_32(((struct mlx5_devx_obj *)
384 (pool->flow_hit_aso_obj))->id);
385 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
386 MLX5_COMP_MODE_OFFSET);
387 wqe->general_cseg.opcode = rte_cpu_to_be_32
388 (MLX5_OPCODE_ACCESS_ASO |
389 (ASO_OPC_MOD_FLOW_HIT <<
390 WQE_CSEG_OPC_MOD_OFFSET) |
392 WQE_CSEG_WQE_INDEX_OFFSET));
393 sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
398 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
399 MLX5_COMP_MODE_OFFSET);
401 sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
403 *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
405 return sq->elts[start_head & mask].burst_size;
409 * Debug utility function. Dump contents of error CQE and WQE.
417 mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
421 DRV_LOG(ERR, "Error cqe:");
422 for (i = 0; i < 16; i += 4)
423 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
424 cqe[i + 2], cqe[i + 3]);
425 DRV_LOG(ERR, "\nError wqe:");
426 for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
427 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
428 wqe[i + 2], wqe[i + 3]);
432 * Handle case of error CQE.
438 mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
440 struct mlx5_aso_cq *cq = &sq->cq;
441 uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
442 volatile struct mlx5_err_cqe *cqe =
443 (volatile struct mlx5_err_cqe *)&cq->cq_obj.cqes[idx];
446 idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
447 mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
448 (volatile uint32_t *)&sq->sq_obj.aso_wqes[idx]);
452 * Update ASO objects upon completion.
455 * Shared device context.
457 * Number of completed ASO objects.
460 mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
462 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
463 struct mlx5_aso_sq *sq = &mng->aso_sq;
464 struct mlx5_age_info *age_info;
465 const uint16_t size = 1 << sq->log_desc_n;
466 const uint16_t mask = size - 1;
467 const uint64_t curr = MLX5_CURR_TIME_SEC;
468 uint16_t expected = AGE_CANDIDATE;
471 for (i = 0; i < n; ++i) {
472 uint16_t idx = (sq->tail + i) & mask;
473 struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
474 uint64_t diff = curr - pool->time_of_last_age_check;
475 uint64_t *addr = sq->mr.buf;
478 addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
479 pool->time_of_last_age_check = curr;
480 for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
481 struct mlx5_aso_age_action *act = &pool->actions[j];
482 struct mlx5_age_param *ap = &act->age_params;
488 if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
493 u8addr = (uint8_t *)addr;
494 hit = (u8addr[byte] >> offset) & 0x1;
496 __atomic_store_n(&ap->sec_since_last_hit, 0,
499 struct mlx5_priv *priv;
501 __atomic_fetch_add(&ap->sec_since_last_hit,
502 diff, __ATOMIC_RELAXED);
503 /* If timeout passed add to aged-out list. */
504 if (ap->sec_since_last_hit <= ap->timeout)
507 rte_eth_devices[ap->port_id].data->dev_private;
508 age_info = GET_PORT_AGE_INFO(priv);
509 rte_spinlock_lock(&age_info->aged_sl);
510 if (__atomic_compare_exchange_n(&ap->state,
516 LIST_INSERT_HEAD(&age_info->aged_aso,
518 MLX5_AGE_SET(age_info,
521 rte_spinlock_unlock(&age_info->aged_sl);
525 mlx5_age_event_prepare(sh);
529 * Handle completions from WQEs sent to ASO SQ.
532 * Shared device context.
535 * Number of CQEs handled.
538 mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
540 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
541 struct mlx5_aso_sq *sq = &mng->aso_sq;
542 struct mlx5_aso_cq *cq = &sq->cq;
543 volatile struct mlx5_cqe *restrict cqe;
544 const unsigned int cq_size = 1 << cq->log_desc_n;
545 const unsigned int mask = cq_size - 1;
547 uint32_t next_idx = cq->cq_ci & mask;
548 const uint16_t max = (uint16_t)(sq->head - sq->tail);
555 next_idx = (cq->cq_ci + 1) & mask;
556 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
557 cqe = &cq->cq_obj.cqes[idx];
558 ret = check_cqe(cqe, cq_size, cq->cq_ci);
560 * Be sure owner read is done before any other cookie field or
564 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
565 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
567 mlx5_aso_cqe_err_handle(sq);
569 i += sq->elts[(sq->tail + i) & mask].burst_size;
574 mlx5_aso_age_action_update(sh, i);
577 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
583 * Periodically read CQEs and send WQEs to ASO SQ.
586 * Shared device context containing the ASO SQ.
589 mlx5_flow_aso_alarm(void *arg)
591 struct mlx5_dev_ctx_shared *sh = arg;
592 struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
596 rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
597 n = sh->aso_age_mng->next;
598 rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
599 mlx5_aso_completion_handle(sh);
601 /* End of loop: wait 1 second. */
605 mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
606 if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
607 DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
611 * API to start ASO access using ASO SQ.
614 * Pointer to shared device context.
617 * 0 on success, a negative errno value otherwise and rte_errno is set.
620 mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh)
622 if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
623 DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
630 * API to stop ASO access using ASO SQ.
633 * Pointer to shared device context.
636 * 0 on success, a negative errno value otherwise and rte_errno is set.
639 mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh)
643 if (!sh->aso_age_mng->aso_sq.sq_obj.sq)
647 rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
648 if (rte_errno != EINPROGRESS)
656 mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq,
657 struct mlx5_aso_mtr *aso_mtr)
659 volatile struct mlx5_aso_wqe *wqe = NULL;
660 struct mlx5_flow_meter_info *fm = NULL;
661 struct mlx5_flow_meter_profile *fmp;
662 uint16_t size = 1 << sq->log_desc_n;
663 uint16_t mask = size - 1;
665 uint32_t dseg_idx = 0;
666 struct mlx5_aso_mtr_pool *pool = NULL;
668 rte_spinlock_lock(&sq->sqsl);
669 res = size - (uint16_t)(sq->head - sq->tail);
670 if (unlikely(!res)) {
671 DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send");
672 rte_spinlock_unlock(&sq->sqsl);
675 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
676 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
679 sq->elts[sq->head & mask].mtr = aso_mtr;
680 pool = container_of(aso_mtr, struct mlx5_aso_mtr_pool,
681 mtrs[aso_mtr->offset]);
682 wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
683 (aso_mtr->offset >> 1));
684 wqe->general_cseg.opcode = rte_cpu_to_be_32(MLX5_OPCODE_ACCESS_ASO |
685 (ASO_OPC_MOD_POLICER <<
686 WQE_CSEG_OPC_MOD_OFFSET) |
687 sq->pi << WQE_CSEG_WQE_INDEX_OFFSET);
688 /* There are 2 meters in one ASO cache line. */
689 dseg_idx = aso_mtr->offset & 0x1;
690 wqe->aso_cseg.data_mask =
691 RTE_BE64(MLX5_IFC_FLOW_METER_PARAM_MASK << (32 * !dseg_idx));
693 wqe->aso_dseg.mtrs[dseg_idx].cbs_cir =
694 fm->profile->srtcm_prm.cbs_cir;
695 wqe->aso_dseg.mtrs[dseg_idx].ebs_eir =
696 fm->profile->srtcm_prm.ebs_eir;
698 wqe->aso_dseg.mtrs[dseg_idx].cbs_cir =
699 RTE_BE32(MLX5_IFC_FLOW_METER_DISABLE_CBS_CIR_VAL);
700 wqe->aso_dseg.mtrs[dseg_idx].ebs_eir = 0;
703 if (fmp->profile.packet_mode)
704 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm =
705 RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
706 (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET) |
707 (MLX5_METER_MODE_PKT << ASO_DSEG_MTR_MODE));
709 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm =
710 RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
711 (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET));
713 sq->pi += 2;/* Each WQE contains 2 WQEBB's. */
715 sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
717 *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH. */
719 rte_spinlock_unlock(&sq->sqsl);
724 mlx5_aso_mtrs_status_update(struct mlx5_aso_sq *sq, uint16_t aso_mtrs_nums)
726 uint16_t size = 1 << sq->log_desc_n;
727 uint16_t mask = size - 1;
729 struct mlx5_aso_mtr *aso_mtr = NULL;
730 uint8_t exp_state = ASO_METER_WAIT;
732 for (i = 0; i < aso_mtrs_nums; ++i) {
733 aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
734 MLX5_ASSERT(aso_mtr);
735 (void)__atomic_compare_exchange_n(&aso_mtr->state,
736 &exp_state, ASO_METER_READY,
737 false, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
742 mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq)
744 struct mlx5_aso_cq *cq = &sq->cq;
745 volatile struct mlx5_cqe *restrict cqe;
746 const unsigned int cq_size = 1 << cq->log_desc_n;
747 const unsigned int mask = cq_size - 1;
749 uint32_t next_idx = cq->cq_ci & mask;
754 rte_spinlock_lock(&sq->sqsl);
755 max = (uint16_t)(sq->head - sq->tail);
756 if (unlikely(!max)) {
757 rte_spinlock_unlock(&sq->sqsl);
762 next_idx = (cq->cq_ci + 1) & mask;
763 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
764 cqe = &cq->cq_obj.cqes[idx];
765 ret = check_cqe(cqe, cq_size, cq->cq_ci);
767 * Be sure owner read is done before any other cookie field or
771 if (ret != MLX5_CQE_STATUS_SW_OWN) {
772 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
774 mlx5_aso_cqe_err_handle(sq);
781 mlx5_aso_mtrs_status_update(sq, n);
784 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
786 rte_spinlock_unlock(&sq->sqsl);
790 * Update meter parameter by send WQE.
793 * Pointer to Ethernet device.
795 * Pointer to mlx5 private data structure.
797 * Pointer to flow meter to be modified.
800 * 0 on success, a negative errno value otherwise and rte_errno is set.
803 mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
804 struct mlx5_aso_mtr *mtr)
806 struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq;
807 uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
810 mlx5_aso_mtr_completion_handle(sq);
811 if (mlx5_aso_mtr_sq_enqueue_single(sq, mtr))
813 /* Waiting for wqe resource. */
814 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
815 } while (--poll_wqe_times);
816 DRV_LOG(ERR, "Fail to send WQE for ASO meter offset %d",
822 * Wait for meter to be ready.
825 * Pointer to Ethernet device.
827 * Pointer to mlx5 private data structure.
829 * Pointer to flow meter to be modified.
832 * 0 on success, a negative errno value otherwise and rte_errno is set.
835 mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,
836 struct mlx5_aso_mtr *mtr)
838 struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq;
839 uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
841 if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
845 mlx5_aso_mtr_completion_handle(sq);
846 if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
849 /* Waiting for CQE ready. */
850 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
851 } while (--poll_cqe_times);
852 DRV_LOG(ERR, "Fail to poll CQE ready for ASO meter offset %d",