1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_eal_paging.h>
9 #include <mlx5_malloc.h>
10 #include <mlx5_common_os.h>
11 #include <mlx5_common_devx.h>
14 #include "mlx5_flow.h"
17 * Destroy Completion Queue used for ASO access.
23 mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
26 mlx5_devx_cq_destroy(&cq->cq_obj);
27 memset(cq, 0, sizeof(*cq));
31 * Create Completion Queue used for ASO access.
34 * Context returned from mlx5 open_device() glue function.
36 * Pointer to CQ to create.
37 * @param[in] log_desc_n
38 * Log of number of descriptors in queue.
40 * Socket to use for allocation.
41 * @param[in] uar_page_id
45 * 0 on success, a negative errno value otherwise and rte_errno is set.
48 mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
49 int socket, int uar_page_id)
51 struct mlx5_devx_cq_attr attr = {
52 .uar_page_id = uar_page_id,
55 cq->log_desc_n = log_desc_n;
57 return mlx5_devx_cq_create(ctx, &cq->cq_obj, log_desc_n, &attr, socket);
64 * Pointer to shared device context.
69 mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr)
71 void *addr = mr->addr;
73 sh->share_cache.dereg_mr_cb(mr);
75 memset(mr, 0, sizeof(*mr));
79 * Register Memory Region.
82 * Pointer to shared device context.
86 * Pointer to MR to create.
88 * Socket to use for allocation.
91 * 0 on success, a negative errno value otherwise and rte_errno is set.
94 mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length,
95 struct mlx5_pmd_mr *mr, int socket)
100 mr->addr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
103 DRV_LOG(ERR, "Failed to create ASO bits mem for MR.");
106 ret = sh->share_cache.reg_mr_cb(sh->pd, mr->addr, length, mr);
108 DRV_LOG(ERR, "Failed to create direct Mkey.");
116 * Destroy Send Queue used for ASO access.
122 mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
124 mlx5_devx_sq_destroy(&sq->sq_obj);
125 mlx5_aso_cq_destroy(&sq->cq);
126 memset(sq, 0, sizeof(*sq));
130 * Initialize Send Queue used for ASO access.
133 * ASO SQ to initialize.
136 mlx5_aso_age_init_sq(struct mlx5_aso_sq *sq)
138 volatile struct mlx5_aso_wqe *restrict wqe;
140 int size = 1 << sq->log_desc_n;
143 /* All the next fields state should stay constant. */
144 for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
145 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
146 (sizeof(*wqe) >> 4));
147 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.lkey);
148 addr = (uint64_t)((uint64_t *)sq->mr.addr + i *
149 MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
150 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
151 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
152 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
154 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
155 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
156 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
157 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
158 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
163 * Initialize Send Queue used for ASO flow meter access.
166 * ASO SQ to initialize.
169 mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq)
171 volatile struct mlx5_aso_wqe *restrict wqe;
173 int size = 1 << sq->log_desc_n;
175 /* All the next fields state should stay constant. */
176 for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
177 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
178 (sizeof(*wqe) >> 4));
179 wqe->aso_cseg.operand_masks = RTE_BE32(0u |
180 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
181 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
182 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
183 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
184 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
185 MLX5_COMP_MODE_OFFSET);
190 * Initialize Send Queue used for ASO connection tracking.
193 * ASO SQ to initialize.
196 mlx5_aso_ct_init_sq(struct mlx5_aso_sq *sq)
198 volatile struct mlx5_aso_wqe *restrict wqe;
200 int size = 1 << sq->log_desc_n;
203 /* All the next fields state should stay constant. */
204 for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
205 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
206 (sizeof(*wqe) >> 4));
207 /* One unique MR for the query data. */
208 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.lkey);
209 /* Magic number 64 represents the length of a ASO CT obj. */
210 addr = (uint64_t)((uintptr_t)sq->mr.addr + i * 64);
211 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
212 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
214 * The values of operand_masks are different for modify
216 * And data_mask may be different for each modification. In
217 * query, it could be zero and ignored.
218 * CQE generation is always needed, in order to decide when
219 * it is available to create the flow or read the data.
221 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
222 MLX5_COMP_MODE_OFFSET);
227 * Create Send Queue used for ASO access.
230 * Context returned from mlx5 open_device() glue function.
232 * Pointer to SQ to create.
234 * Socket to use for allocation.
236 * User Access Region object.
238 * Protection Domain number to use.
239 * @param[in] log_desc_n
240 * Log of number of descriptors in queue.
241 * @param[in] ts_format
242 * timestamp format supported by the queue.
245 * 0 on success, a negative errno value otherwise and rte_errno is set.
248 mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket, void *uar,
249 uint32_t pdn, uint16_t log_desc_n, uint32_t ts_format)
251 struct mlx5_devx_create_sq_attr attr = {
252 .user_index = 0xFFFF,
253 .wq_attr = (struct mlx5_devx_wq_attr){
255 .uar_page = mlx5_os_get_devx_uar_page_id(uar),
257 .ts_format = mlx5_ts_format_conv(ts_format),
259 struct mlx5_devx_modify_sq_attr modify_attr = {
260 .state = MLX5_SQC_STATE_RDY,
265 if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
266 mlx5_os_get_devx_uar_page_id(uar)))
268 sq->log_desc_n = log_desc_n;
269 attr.cqn = sq->cq.cq_obj.cq->id;
270 /* for mlx5_aso_wqe that is twice the size of mlx5_wqe */
271 log_wqbb_n = log_desc_n + 1;
272 ret = mlx5_devx_sq_create(ctx, &sq->sq_obj, log_wqbb_n, &attr, socket);
274 DRV_LOG(ERR, "Can't create SQ object.");
278 ret = mlx5_devx_cmd_modify_sq(sq->sq_obj.sq, &modify_attr);
280 DRV_LOG(ERR, "Can't change SQ state to ready.");
287 sq->sqn = sq->sq_obj.sq->id;
288 sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar);
289 rte_spinlock_init(&sq->sqsl);
292 mlx5_aso_destroy_sq(sq);
297 * API to create and initialize Send Queue used for ASO access.
300 * Pointer to shared device context.
301 * @param[in] aso_opc_mod
302 * Mode of ASO feature.
305 * 0 on success, a negative errno value otherwise and rte_errno is set.
308 mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
309 enum mlx5_access_aso_opc_mod aso_opc_mod)
311 uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
313 switch (aso_opc_mod) {
314 case ASO_OPC_MOD_FLOW_HIT:
315 if (mlx5_aso_reg_mr(sh, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
316 sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
318 if (mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
319 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
321 mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
324 mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
326 case ASO_OPC_MOD_POLICER:
327 if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->pools_mng.sq, 0,
328 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
331 mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
333 case ASO_OPC_MOD_CONNECTION_TRACKING:
334 /* 64B per object for query. */
335 if (mlx5_aso_reg_mr(sh, 64 * sq_desc_n,
336 &sh->ct_mng->aso_sq.mr, 0))
338 if (mlx5_aso_sq_create(sh->ctx, &sh->ct_mng->aso_sq, 0,
339 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
341 mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
344 mlx5_aso_ct_init_sq(&sh->ct_mng->aso_sq);
347 DRV_LOG(ERR, "Unknown ASO operation mode");
354 * API to destroy Send Queue used for ASO access.
357 * Pointer to shared device context.
358 * @param[in] aso_opc_mod
359 * Mode of ASO feature.
362 mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
363 enum mlx5_access_aso_opc_mod aso_opc_mod)
365 struct mlx5_aso_sq *sq;
367 switch (aso_opc_mod) {
368 case ASO_OPC_MOD_FLOW_HIT:
369 mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
370 sq = &sh->aso_age_mng->aso_sq;
372 case ASO_OPC_MOD_POLICER:
373 sq = &sh->mtrmng->pools_mng.sq;
376 DRV_LOG(ERR, "Unknown ASO operation mode");
379 mlx5_aso_destroy_sq(sq);
383 * Write a burst of WQEs to ASO SQ.
386 * ASO management data, contains the SQ.
388 * Index of the last valid pool.
391 * Number of WQEs in burst.
394 mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
396 volatile struct mlx5_aso_wqe *wqe;
397 struct mlx5_aso_sq *sq = &mng->aso_sq;
398 struct mlx5_aso_age_pool *pool;
399 uint16_t size = 1 << sq->log_desc_n;
400 uint16_t mask = size - 1;
402 uint16_t start_head = sq->head;
404 max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next);
407 sq->elts[start_head & mask].burst_size = max;
409 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
410 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
412 rte_spinlock_lock(&mng->resize_sl);
413 pool = mng->pools[sq->next];
414 rte_spinlock_unlock(&mng->resize_sl);
415 sq->elts[sq->head & mask].pool = pool;
416 wqe->general_cseg.misc =
417 rte_cpu_to_be_32(((struct mlx5_devx_obj *)
418 (pool->flow_hit_aso_obj))->id);
419 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
420 MLX5_COMP_MODE_OFFSET);
421 wqe->general_cseg.opcode = rte_cpu_to_be_32
422 (MLX5_OPCODE_ACCESS_ASO |
423 (ASO_OPC_MOD_FLOW_HIT <<
424 WQE_CSEG_OPC_MOD_OFFSET) |
426 WQE_CSEG_WQE_INDEX_OFFSET));
427 sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
432 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
433 MLX5_COMP_MODE_OFFSET);
435 sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
437 *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
439 return sq->elts[start_head & mask].burst_size;
443 * Debug utility function. Dump contents of error CQE and WQE.
451 mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
455 DRV_LOG(ERR, "Error cqe:");
456 for (i = 0; i < 16; i += 4)
457 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
458 cqe[i + 2], cqe[i + 3]);
459 DRV_LOG(ERR, "\nError wqe:");
460 for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
461 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
462 wqe[i + 2], wqe[i + 3]);
466 * Handle case of error CQE.
472 mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
474 struct mlx5_aso_cq *cq = &sq->cq;
475 uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
476 volatile struct mlx5_err_cqe *cqe =
477 (volatile struct mlx5_err_cqe *)&cq->cq_obj.cqes[idx];
480 idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
481 mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
482 (volatile uint32_t *)&sq->sq_obj.aso_wqes[idx]);
486 * Update ASO objects upon completion.
489 * Shared device context.
491 * Number of completed ASO objects.
494 mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
496 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
497 struct mlx5_aso_sq *sq = &mng->aso_sq;
498 struct mlx5_age_info *age_info;
499 const uint16_t size = 1 << sq->log_desc_n;
500 const uint16_t mask = size - 1;
501 const uint64_t curr = MLX5_CURR_TIME_SEC;
502 uint16_t expected = AGE_CANDIDATE;
505 for (i = 0; i < n; ++i) {
506 uint16_t idx = (sq->tail + i) & mask;
507 struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
508 uint64_t diff = curr - pool->time_of_last_age_check;
509 uint64_t *addr = sq->mr.addr;
512 addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
513 pool->time_of_last_age_check = curr;
514 for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
515 struct mlx5_aso_age_action *act = &pool->actions[j];
516 struct mlx5_age_param *ap = &act->age_params;
522 if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
527 u8addr = (uint8_t *)addr;
528 hit = (u8addr[byte] >> offset) & 0x1;
530 __atomic_store_n(&ap->sec_since_last_hit, 0,
533 struct mlx5_priv *priv;
535 __atomic_fetch_add(&ap->sec_since_last_hit,
536 diff, __ATOMIC_RELAXED);
537 /* If timeout passed add to aged-out list. */
538 if (ap->sec_since_last_hit <= ap->timeout)
541 rte_eth_devices[ap->port_id].data->dev_private;
542 age_info = GET_PORT_AGE_INFO(priv);
543 rte_spinlock_lock(&age_info->aged_sl);
544 if (__atomic_compare_exchange_n(&ap->state,
550 LIST_INSERT_HEAD(&age_info->aged_aso,
552 MLX5_AGE_SET(age_info,
555 rte_spinlock_unlock(&age_info->aged_sl);
559 mlx5_age_event_prepare(sh);
563 * Handle completions from WQEs sent to ASO SQ.
566 * Shared device context.
569 * Number of CQEs handled.
572 mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
574 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
575 struct mlx5_aso_sq *sq = &mng->aso_sq;
576 struct mlx5_aso_cq *cq = &sq->cq;
577 volatile struct mlx5_cqe *restrict cqe;
578 const unsigned int cq_size = 1 << cq->log_desc_n;
579 const unsigned int mask = cq_size - 1;
581 uint32_t next_idx = cq->cq_ci & mask;
582 const uint16_t max = (uint16_t)(sq->head - sq->tail);
589 next_idx = (cq->cq_ci + 1) & mask;
590 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
591 cqe = &cq->cq_obj.cqes[idx];
592 ret = check_cqe(cqe, cq_size, cq->cq_ci);
594 * Be sure owner read is done before any other cookie field or
598 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
599 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
601 mlx5_aso_cqe_err_handle(sq);
603 i += sq->elts[(sq->tail + i) & mask].burst_size;
608 mlx5_aso_age_action_update(sh, i);
611 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
617 * Periodically read CQEs and send WQEs to ASO SQ.
620 * Shared device context containing the ASO SQ.
623 mlx5_flow_aso_alarm(void *arg)
625 struct mlx5_dev_ctx_shared *sh = arg;
626 struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
630 rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
631 n = sh->aso_age_mng->next;
632 rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
633 mlx5_aso_completion_handle(sh);
635 /* End of loop: wait 1 second. */
639 mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
640 if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
641 DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
645 * API to start ASO access using ASO SQ.
648 * Pointer to shared device context.
651 * 0 on success, a negative errno value otherwise and rte_errno is set.
654 mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh)
656 if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
657 DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
664 * API to stop ASO access using ASO SQ.
667 * Pointer to shared device context.
670 * 0 on success, a negative errno value otherwise and rte_errno is set.
673 mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh)
677 if (!sh->aso_age_mng->aso_sq.sq_obj.sq)
681 rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
682 if (rte_errno != EINPROGRESS)
690 mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq,
691 struct mlx5_aso_mtr *aso_mtr)
693 volatile struct mlx5_aso_wqe *wqe = NULL;
694 struct mlx5_flow_meter_info *fm = NULL;
695 struct mlx5_flow_meter_profile *fmp;
696 uint16_t size = 1 << sq->log_desc_n;
697 uint16_t mask = size - 1;
699 uint32_t dseg_idx = 0;
700 struct mlx5_aso_mtr_pool *pool = NULL;
702 rte_spinlock_lock(&sq->sqsl);
703 res = size - (uint16_t)(sq->head - sq->tail);
704 if (unlikely(!res)) {
705 DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send");
706 rte_spinlock_unlock(&sq->sqsl);
709 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
710 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
713 sq->elts[sq->head & mask].mtr = aso_mtr;
714 pool = container_of(aso_mtr, struct mlx5_aso_mtr_pool,
715 mtrs[aso_mtr->offset]);
716 wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
717 (aso_mtr->offset >> 1));
718 wqe->general_cseg.opcode = rte_cpu_to_be_32(MLX5_OPCODE_ACCESS_ASO |
719 (ASO_OPC_MOD_POLICER <<
720 WQE_CSEG_OPC_MOD_OFFSET) |
721 sq->pi << WQE_CSEG_WQE_INDEX_OFFSET);
722 /* There are 2 meters in one ASO cache line. */
723 dseg_idx = aso_mtr->offset & 0x1;
724 wqe->aso_cseg.data_mask =
725 RTE_BE64(MLX5_IFC_FLOW_METER_PARAM_MASK << (32 * !dseg_idx));
727 wqe->aso_dseg.mtrs[dseg_idx].cbs_cir =
728 fm->profile->srtcm_prm.cbs_cir;
729 wqe->aso_dseg.mtrs[dseg_idx].ebs_eir =
730 fm->profile->srtcm_prm.ebs_eir;
732 wqe->aso_dseg.mtrs[dseg_idx].cbs_cir =
733 RTE_BE32(MLX5_IFC_FLOW_METER_DISABLE_CBS_CIR_VAL);
734 wqe->aso_dseg.mtrs[dseg_idx].ebs_eir = 0;
737 if (fmp->profile.packet_mode)
738 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm =
739 RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
740 (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET) |
741 (MLX5_METER_MODE_PKT << ASO_DSEG_MTR_MODE));
743 wqe->aso_dseg.mtrs[dseg_idx].v_bo_sc_bbog_mm =
744 RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
745 (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET));
747 sq->pi += 2;/* Each WQE contains 2 WQEBB's. */
749 sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
751 *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH. */
753 rte_spinlock_unlock(&sq->sqsl);
758 mlx5_aso_mtrs_status_update(struct mlx5_aso_sq *sq, uint16_t aso_mtrs_nums)
760 uint16_t size = 1 << sq->log_desc_n;
761 uint16_t mask = size - 1;
763 struct mlx5_aso_mtr *aso_mtr = NULL;
764 uint8_t exp_state = ASO_METER_WAIT;
766 for (i = 0; i < aso_mtrs_nums; ++i) {
767 aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
768 MLX5_ASSERT(aso_mtr);
769 (void)__atomic_compare_exchange_n(&aso_mtr->state,
770 &exp_state, ASO_METER_READY,
771 false, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
776 mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq)
778 struct mlx5_aso_cq *cq = &sq->cq;
779 volatile struct mlx5_cqe *restrict cqe;
780 const unsigned int cq_size = 1 << cq->log_desc_n;
781 const unsigned int mask = cq_size - 1;
783 uint32_t next_idx = cq->cq_ci & mask;
788 rte_spinlock_lock(&sq->sqsl);
789 max = (uint16_t)(sq->head - sq->tail);
790 if (unlikely(!max)) {
791 rte_spinlock_unlock(&sq->sqsl);
796 next_idx = (cq->cq_ci + 1) & mask;
797 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
798 cqe = &cq->cq_obj.cqes[idx];
799 ret = check_cqe(cqe, cq_size, cq->cq_ci);
801 * Be sure owner read is done before any other cookie field or
805 if (ret != MLX5_CQE_STATUS_SW_OWN) {
806 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
808 mlx5_aso_cqe_err_handle(sq);
815 mlx5_aso_mtrs_status_update(sq, n);
818 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
820 rte_spinlock_unlock(&sq->sqsl);
824 * Update meter parameter by send WQE.
827 * Pointer to Ethernet device.
829 * Pointer to mlx5 private data structure.
831 * Pointer to flow meter to be modified.
834 * 0 on success, a negative errno value otherwise and rte_errno is set.
837 mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
838 struct mlx5_aso_mtr *mtr)
840 struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq;
841 uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
844 mlx5_aso_mtr_completion_handle(sq);
845 if (mlx5_aso_mtr_sq_enqueue_single(sq, mtr))
847 /* Waiting for wqe resource. */
848 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
849 } while (--poll_wqe_times);
850 DRV_LOG(ERR, "Fail to send WQE for ASO meter offset %d",
856 * Wait for meter to be ready.
859 * Pointer to Ethernet device.
861 * Pointer to mlx5 private data structure.
863 * Pointer to flow meter to be modified.
866 * 0 on success, a negative errno value otherwise and rte_errno is set.
869 mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,
870 struct mlx5_aso_mtr *mtr)
872 struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq;
873 uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
875 if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
879 mlx5_aso_mtr_completion_handle(sq);
880 if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
883 /* Waiting for CQE ready. */
884 rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
885 } while (--poll_cqe_times);
886 DRV_LOG(ERR, "Fail to poll CQE ready for ASO meter offset %d",