1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_eal_paging.h>
9 #include <mlx5_malloc.h>
10 #include <mlx5_common_os.h>
11 #include <mlx5_common_devx.h>
14 #include "mlx5_flow.h"
18 * Destroy Completion Queue used for ASO access.
24 mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
27 mlx5_devx_cq_destroy(&cq->cq_obj);
28 memset(cq, 0, sizeof(*cq));
32 * Create Completion Queue used for ASO access.
35 * Context returned from mlx5 open_device() glue function.
37 * Pointer to CQ to create.
38 * @param[in] log_desc_n
39 * Log of number of descriptors in queue.
41 * Socket to use for allocation.
42 * @param[in] uar_page_id
46 * 0 on success, a negative errno value otherwise and rte_errno is set.
49 mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
50 int socket, int uar_page_id)
52 struct mlx5_devx_cq_attr attr = {
53 .uar_page_id = uar_page_id,
56 cq->log_desc_n = log_desc_n;
58 return mlx5_devx_cq_create(ctx, &cq->cq_obj, log_desc_n, &attr, socket);
68 mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
70 claim_zero(mlx5_devx_cmd_destroy(mr->mkey));
71 if (!mr->is_indirect && mr->umem)
72 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
74 memset(mr, 0, sizeof(*mr));
78 * Register Memory Region.
81 * Context returned from mlx5 open_device() glue function.
85 * Pointer to MR to create.
87 * Socket to use for allocation.
89 * Protection Domain number to use.
92 * 0 on success, a negative errno value otherwise and rte_errno is set.
95 mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
98 struct mlx5_devx_mkey_attr mkey_attr;
100 mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
103 DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx.");
106 mr->umem = mlx5_os_umem_reg(ctx, mr->buf, length,
107 IBV_ACCESS_LOCAL_WRITE);
109 DRV_LOG(ERR, "Failed to register Umem for MR by Devx.");
112 mkey_attr.addr = (uintptr_t)mr->buf;
113 mkey_attr.size = length;
114 mkey_attr.umem_id = mlx5_os_get_umem_id(mr->umem);
116 mkey_attr.pg_access = 1;
117 mkey_attr.klm_array = NULL;
118 mkey_attr.klm_num = 0;
119 mkey_attr.relaxed_ordering_read = 0;
120 mkey_attr.relaxed_ordering_write = 0;
121 mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr);
123 DRV_LOG(ERR, "Failed to create direct Mkey.");
127 mr->is_indirect = false;
131 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
137 * Destroy Send Queue used for ASO access.
143 mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
145 mlx5_devx_sq_destroy(&sq->sq_obj);
146 mlx5_aso_cq_destroy(&sq->cq);
147 mlx5_aso_devx_dereg_mr(&sq->mr);
148 memset(sq, 0, sizeof(*sq));
152 * Initialize Send Queue used for ASO access.
155 * ASO SQ to initialize.
158 mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
160 volatile struct mlx5_aso_wqe *restrict wqe;
162 int size = 1 << sq->log_desc_n;
165 /* All the next fields state should stay constant. */
166 for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
167 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
168 (sizeof(*wqe) >> 4));
169 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id);
170 addr = (uint64_t)((uint64_t *)sq->mr.buf + i *
171 MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
172 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
173 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
174 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
176 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
177 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
178 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
179 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
180 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
185 * Create Send Queue used for ASO access.
188 * Context returned from mlx5 open_device() glue function.
190 * Pointer to SQ to create.
192 * Socket to use for allocation.
194 * User Access Region object.
196 * Protection Domain number to use.
197 * @param[in] log_desc_n
198 * Log of number of descriptors in queue.
201 * 0 on success, a negative errno value otherwise and rte_errno is set.
204 mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
205 void *uar, uint32_t pdn, uint16_t log_desc_n)
207 struct mlx5_devx_create_sq_attr attr = {
208 .user_index = 0xFFFF,
209 .wq_attr = (struct mlx5_devx_wq_attr){
211 .uar_page = mlx5_os_get_devx_uar_page_id(uar),
214 struct mlx5_devx_modify_sq_attr modify_attr = {
215 .state = MLX5_SQC_STATE_RDY,
217 uint32_t sq_desc_n = 1 << log_desc_n;
221 if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
222 sq_desc_n, &sq->mr, socket, pdn))
224 if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
225 mlx5_os_get_devx_uar_page_id(uar)))
227 sq->log_desc_n = log_desc_n;
228 attr.cqn = sq->cq.cq_obj.cq->id;
229 /* for mlx5_aso_wqe that is twice the size of mlx5_wqe */
230 log_wqbb_n = log_desc_n + 1;
231 ret = mlx5_devx_sq_create(ctx, &sq->sq_obj, log_wqbb_n, &attr, socket);
233 DRV_LOG(ERR, "Can't create SQ object.");
237 ret = mlx5_devx_cmd_modify_sq(sq->sq_obj.sq, &modify_attr);
239 DRV_LOG(ERR, "Can't change SQ state to ready.");
246 sq->sqn = sq->sq_obj.sq->id;
247 sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar);
248 mlx5_aso_init_sq(sq);
251 mlx5_aso_destroy_sq(sq);
256 * API to create and initialize Send Queue used for ASO access.
259 * Pointer to shared device context.
262 * 0 on success, a negative errno value otherwise and rte_errno is set.
265 mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh)
267 return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
268 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC);
272 * API to destroy Send Queue used for ASO access.
275 * Pointer to shared device context.
278 mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh)
280 mlx5_aso_destroy_sq(&sh->aso_age_mng->aso_sq);
284 * Write a burst of WQEs to ASO SQ.
287 * ASO management data, contains the SQ.
289 * Index of the last valid pool.
292 * Number of WQEs in burst.
295 mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
297 volatile struct mlx5_aso_wqe *wqe;
298 struct mlx5_aso_sq *sq = &mng->aso_sq;
299 struct mlx5_aso_age_pool *pool;
300 uint16_t size = 1 << sq->log_desc_n;
301 uint16_t mask = size - 1;
303 uint16_t start_head = sq->head;
305 max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next);
308 sq->elts[start_head & mask].burst_size = max;
310 wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
311 rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
313 rte_spinlock_lock(&mng->resize_sl);
314 pool = mng->pools[sq->next];
315 rte_spinlock_unlock(&mng->resize_sl);
316 sq->elts[sq->head & mask].pool = pool;
317 wqe->general_cseg.misc =
318 rte_cpu_to_be_32(((struct mlx5_devx_obj *)
319 (pool->flow_hit_aso_obj))->id);
320 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
321 MLX5_COMP_MODE_OFFSET);
322 wqe->general_cseg.opcode = rte_cpu_to_be_32
323 (MLX5_OPCODE_ACCESS_ASO |
324 (ASO_OPC_MOD_FLOW_HIT <<
325 WQE_CSEG_OPC_MOD_OFFSET) |
327 WQE_CSEG_WQE_INDEX_OFFSET));
328 sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
333 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
334 MLX5_COMP_MODE_OFFSET);
336 sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
338 *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
340 return sq->elts[start_head & mask].burst_size;
344 * Debug utility function. Dump contents of error CQE and WQE.
352 mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
356 DRV_LOG(ERR, "Error cqe:");
357 for (i = 0; i < 16; i += 4)
358 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
359 cqe[i + 2], cqe[i + 3]);
360 DRV_LOG(ERR, "\nError wqe:");
361 for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
362 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
363 wqe[i + 2], wqe[i + 3]);
367 * Handle case of error CQE.
373 mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
375 struct mlx5_aso_cq *cq = &sq->cq;
376 uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
377 volatile struct mlx5_err_cqe *cqe =
378 (volatile struct mlx5_err_cqe *)&cq->cq_obj.cqes[idx];
381 idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
382 mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
383 (volatile uint32_t *)&sq->sq_obj.aso_wqes[idx]);
387 * Update ASO objects upon completion.
390 * Shared device context.
392 * Number of completed ASO objects.
395 mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
397 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
398 struct mlx5_aso_sq *sq = &mng->aso_sq;
399 struct mlx5_age_info *age_info;
400 const uint16_t size = 1 << sq->log_desc_n;
401 const uint16_t mask = size - 1;
402 const uint64_t curr = MLX5_CURR_TIME_SEC;
403 uint16_t expected = AGE_CANDIDATE;
406 for (i = 0; i < n; ++i) {
407 uint16_t idx = (sq->tail + i) & mask;
408 struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
409 uint64_t diff = curr - pool->time_of_last_age_check;
410 uint64_t *addr = sq->mr.buf;
413 addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
414 pool->time_of_last_age_check = curr;
415 for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
416 struct mlx5_aso_age_action *act = &pool->actions[j];
417 struct mlx5_age_param *ap = &act->age_params;
423 if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
428 u8addr = (uint8_t *)addr;
429 hit = (u8addr[byte] >> offset) & 0x1;
431 __atomic_store_n(&ap->sec_since_last_hit, 0,
434 struct mlx5_priv *priv;
436 __atomic_fetch_add(&ap->sec_since_last_hit,
437 diff, __ATOMIC_RELAXED);
438 /* If timeout passed add to aged-out list. */
439 if (ap->sec_since_last_hit <= ap->timeout)
442 rte_eth_devices[ap->port_id].data->dev_private;
443 age_info = GET_PORT_AGE_INFO(priv);
444 rte_spinlock_lock(&age_info->aged_sl);
445 if (__atomic_compare_exchange_n(&ap->state,
451 LIST_INSERT_HEAD(&age_info->aged_aso,
453 MLX5_AGE_SET(age_info,
456 rte_spinlock_unlock(&age_info->aged_sl);
460 mlx5_age_event_prepare(sh);
464 * Handle completions from WQEs sent to ASO SQ.
467 * Shared device context.
470 * Number of CQEs handled.
473 mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
475 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
476 struct mlx5_aso_sq *sq = &mng->aso_sq;
477 struct mlx5_aso_cq *cq = &sq->cq;
478 volatile struct mlx5_cqe *restrict cqe;
479 const unsigned int cq_size = 1 << cq->log_desc_n;
480 const unsigned int mask = cq_size - 1;
482 uint32_t next_idx = cq->cq_ci & mask;
483 const uint16_t max = (uint16_t)(sq->head - sq->tail);
490 next_idx = (cq->cq_ci + 1) & mask;
491 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
492 cqe = &cq->cq_obj.cqes[idx];
493 ret = check_cqe(cqe, cq_size, cq->cq_ci);
495 * Be sure owner read is done before any other cookie field or
499 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
500 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
502 mlx5_aso_cqe_err_handle(sq);
504 i += sq->elts[(sq->tail + i) & mask].burst_size;
509 mlx5_aso_age_action_update(sh, i);
512 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
518 * Periodically read CQEs and send WQEs to ASO SQ.
521 * Shared device context containing the ASO SQ.
524 mlx5_flow_aso_alarm(void *arg)
526 struct mlx5_dev_ctx_shared *sh = arg;
527 struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
531 rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
532 n = sh->aso_age_mng->next;
533 rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
534 mlx5_aso_completion_handle(sh);
536 /* End of loop: wait 1 second. */
540 mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
541 if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
542 DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
546 * API to start ASO access using ASO SQ.
549 * Pointer to shared device context.
552 * 0 on success, a negative errno value otherwise and rte_errno is set.
555 mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh)
557 if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
558 DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
565 * API to stop ASO access using ASO SQ.
568 * Pointer to shared device context.
571 * 0 on success, a negative errno value otherwise and rte_errno is set.
574 mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh)
578 if (!sh->aso_age_mng->aso_sq.sq_obj.sq)
582 rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
583 if (rte_errno != EINPROGRESS)