1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
8 #include <mlx5_malloc.h>
9 #include <mlx5_common_os.h>
12 #include "mlx5_flow.h"
15 * Destroy Completion Queue used for ASO access.
21 mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
24 claim_zero(mlx5_devx_cmd_destroy(cq->cq));
26 claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
28 mlx5_free((void *)(uintptr_t)cq->umem_buf);
29 memset(cq, 0, sizeof(*cq));
33 * Create Completion Queue used for ASO access.
36 * Context returned from mlx5 open_device() glue function.
38 * Pointer to CQ to create.
39 * @param[in] log_desc_n
40 * Log of number of descriptors in queue.
42 * Socket to use for allocation.
43 * @param[in] uar_page_id
49 * 0 on success, a negative errno value otherwise and rte_errno is set.
52 mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
53 int socket, int uar_page_id, uint32_t eqn)
55 struct mlx5_devx_cq_attr attr = { 0 };
56 size_t pgsize = sysconf(_SC_PAGESIZE);
58 uint16_t cq_size = 1 << log_desc_n;
60 cq->log_desc_n = log_desc_n;
61 umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
62 cq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
65 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
69 cq->umem_obj = mlx5_glue->devx_umem_reg(ctx,
70 (void *)(uintptr_t)cq->umem_buf,
72 IBV_ACCESS_LOCAL_WRITE);
74 DRV_LOG(ERR, "Failed to register umem for aso CQ.");
77 attr.q_umem_valid = 1;
78 attr.db_umem_valid = 1;
79 attr.use_first_only = 0;
80 attr.overrun_ignore = 0;
81 attr.uar_page_id = uar_page_id;
82 attr.q_umem_id = mlx5_os_get_umem_id(cq->umem_obj);
83 attr.q_umem_offset = 0;
84 attr.db_umem_id = attr.q_umem_id;
85 attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
87 attr.log_cq_size = log_desc_n;
88 attr.log_page_size = rte_log2_u32(pgsize);
89 cq->cq = mlx5_devx_cmd_create_cq(ctx, &attr);
92 cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
94 memset((void *)(uintptr_t)cq->umem_buf, 0xFF, attr.db_umem_offset);
97 mlx5_aso_cq_destroy(cq);
108 mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
110 claim_zero(mlx5_devx_cmd_destroy(mr->mkey));
111 if (!mr->is_indirect && mr->umem)
112 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
114 memset(mr, 0, sizeof(*mr));
118 * Register Memory Region.
121 * Context returned from mlx5 open_device() glue function.
125 * Pointer to MR to create.
127 * Socket to use for allocation.
129 * Protection Domain number to use.
132 * 0 on success, a negative errno value otherwise and rte_errno is set.
135 mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
138 struct mlx5_devx_mkey_attr mkey_attr;
140 mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
143 DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx.");
146 mr->umem = mlx5_glue->devx_umem_reg(ctx, mr->buf, length,
147 IBV_ACCESS_LOCAL_WRITE);
149 DRV_LOG(ERR, "Failed to register Umem for MR by Devx.");
152 mkey_attr.addr = (uintptr_t)mr->buf;
153 mkey_attr.size = length;
154 mkey_attr.umem_id = mlx5_os_get_umem_id(mr->umem);
156 mkey_attr.pg_access = 1;
157 mkey_attr.klm_array = NULL;
158 mkey_attr.klm_num = 0;
159 mkey_attr.relaxed_ordering_read = 0;
160 mkey_attr.relaxed_ordering_write = 0;
161 mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr);
163 DRV_LOG(ERR, "Failed to create direct Mkey.");
167 mr->is_indirect = false;
171 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
177 * Destroy Send Queue used for ASO access.
183 mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
186 mlx5_glue->devx_umem_dereg(sq->wqe_umem);
190 mlx5_free((void *)(uintptr_t)sq->umem_buf);
194 mlx5_devx_cmd_destroy(sq->sq);
198 mlx5_aso_cq_destroy(&sq->cq);
200 mlx5_glue->devx_free_uar(sq->uar_obj);
201 mlx5_aso_devx_dereg_mr(&sq->mr);
202 memset(sq, 0, sizeof(*sq));
206 * Initialize Send Queue used for ASO access.
209 * ASO SQ to initialize.
212 mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
214 volatile struct mlx5_aso_wqe *restrict wqe;
216 int size = 1 << sq->log_desc_n;
219 /* All the next fields state should stay constant. */
220 for (i = 0, wqe = &sq->wqes[0]; i < size; ++i, ++wqe) {
221 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
222 (sizeof(*wqe) >> 4));
223 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id);
224 addr = (uint64_t)((uint64_t *)sq->mr.buf + i *
225 MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
226 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
227 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
228 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
230 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
231 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
232 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
233 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
234 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
239 * Create Send Queue used for ASO access.
242 * Context returned from mlx5 open_device() glue function.
244 * Pointer to SQ to create.
246 * Socket to use for allocation.
248 * Protection Domain number to use.
251 * @param[in] log_desc_n
252 * Log of number of descriptors in queue.
255 * 0 on success, a negative errno value otherwise and rte_errno is set.
258 mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
259 uint32_t pdn, uint32_t eqn, uint16_t log_desc_n)
261 struct mlx5_devx_create_sq_attr attr = { 0 };
262 struct mlx5_devx_modify_sq_attr modify_attr = { 0 };
263 size_t pgsize = sysconf(_SC_PAGESIZE);
264 struct mlx5_devx_wq_attr *wq_attr = &attr.wq_attr;
265 uint32_t sq_desc_n = 1 << log_desc_n;
266 uint32_t wq_size = sizeof(struct mlx5_aso_wqe) * sq_desc_n;
269 if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
270 sq_desc_n, &sq->mr, socket, pdn))
272 sq->uar_obj = mlx5_glue->devx_alloc_uar(ctx, 0);
275 if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
276 mlx5_os_get_devx_uar_page_id(sq->uar_obj), eqn))
278 sq->log_desc_n = log_desc_n;
279 sq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size +
280 sizeof(*sq->db_rec) * 2, 4096, socket);
282 DRV_LOG(ERR, "Can't allocate wqe buffer.");
285 sq->wqe_umem = mlx5_glue->devx_umem_reg(ctx,
286 (void *)(uintptr_t)sq->umem_buf,
288 sizeof(*sq->db_rec) * 2,
289 IBV_ACCESS_LOCAL_WRITE);
291 DRV_LOG(ERR, "Failed to register umem for SQ.");
295 attr.state = MLX5_SQC_STATE_RST;
298 attr.user_index = 0xFFFF;
299 attr.cqn = sq->cq.cq->id;
300 wq_attr->uar_page = mlx5_os_get_devx_uar_page_id(sq->uar_obj);
302 wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC;
303 wq_attr->log_wq_pg_sz = rte_log2_u32(pgsize);
304 wq_attr->wq_umem_id = mlx5_os_get_umem_id(sq->wqe_umem);
305 wq_attr->wq_umem_offset = 0;
306 wq_attr->wq_umem_valid = 1;
307 wq_attr->log_wq_stride = 6;
308 wq_attr->log_wq_sz = rte_log2_u32(wq_size) - 6;
309 wq_attr->dbr_umem_id = wq_attr->wq_umem_id;
310 wq_attr->dbr_addr = wq_size;
311 wq_attr->dbr_umem_valid = 1;
312 sq->sq = mlx5_devx_cmd_create_sq(ctx, &attr);
314 DRV_LOG(ERR, "Can't create sq object.");
318 modify_attr.state = MLX5_SQC_STATE_RDY;
319 ret = mlx5_devx_cmd_modify_sq(sq->sq, &modify_attr);
321 DRV_LOG(ERR, "Can't change sq state to ready.");
327 sq->sqn = sq->sq->id;
328 sq->db_rec = RTE_PTR_ADD(sq->umem_buf, (uintptr_t)(wq_attr->dbr_addr));
329 sq->uar_addr = (volatile uint64_t *)((uint8_t *)sq->uar_obj->base_addr +
331 mlx5_aso_init_sq(sq);
334 mlx5_aso_destroy_sq(sq);
339 * API to create and initialize Send Queue used for ASO access.
342 * Pointer to shared device context.
345 * 0 on success, a negative errno value otherwise and rte_errno is set.
348 mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh)
350 return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0, sh->pdn,
351 sh->eqn, MLX5_ASO_QUEUE_LOG_DESC);
355 * API to destroy Send Queue used for ASO access.
358 * Pointer to shared device context.
361 mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh)
363 mlx5_aso_destroy_sq(&sh->aso_age_mng->aso_sq);
367 * Write a burst of WQEs to ASO SQ.
370 * ASO management data, contains the SQ.
372 * Index of the last valid pool.
375 * Number of WQEs in burst.
378 mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
380 volatile struct mlx5_aso_wqe *wqe;
381 struct mlx5_aso_sq *sq = &mng->aso_sq;
382 struct mlx5_aso_age_pool *pool;
383 uint16_t size = 1 << sq->log_desc_n;
384 uint16_t mask = size - 1;
386 uint16_t start_pi = sq->pi;
388 max = RTE_MIN(size - (uint16_t)(sq->pi - sq->ci), n - sq->next);
391 sq->elts[start_pi & mask].burst_size = max;
393 wqe = &sq->wqes[sq->pi & mask];
394 rte_prefetch0(&sq->wqes[(sq->pi + 1) & mask]);
396 rte_spinlock_lock(&mng->resize_sl);
397 pool = mng->pools[sq->next];
398 rte_spinlock_unlock(&mng->resize_sl);
399 sq->elts[sq->pi & mask].pool = pool;
400 wqe->general_cseg.misc =
401 rte_cpu_to_be_32(((struct mlx5_devx_obj *)
402 (pool->flow_hit_aso_obj))->id);
403 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
404 MLX5_COMP_MODE_OFFSET);
405 wqe->general_cseg.opcode = rte_cpu_to_be_32
406 (MLX5_OPCODE_ACCESS_ASO |
407 ASO_OP_MOD_FLOW_HIT << 24 |
413 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
414 MLX5_COMP_MODE_OFFSET);
416 sq->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi << 1);
418 *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
420 return sq->elts[start_pi & mask].burst_size;
424 * Debug utility function. Dump contents of error CQE and WQE.
432 mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
436 DRV_LOG(ERR, "Error cqe:");
437 for (i = 0; i < 16; i += 4)
438 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
439 cqe[i + 2], cqe[i + 3]);
440 DRV_LOG(ERR, "\nError wqe:");
441 for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
442 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
443 wqe[i + 2], wqe[i + 3]);
447 * Handle case of error CQE.
453 mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
455 struct mlx5_aso_cq *cq = &sq->cq;
456 uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
457 volatile struct mlx5_err_cqe *cqe =
458 (volatile struct mlx5_err_cqe *)&cq->cqes[idx];
461 idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
462 mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
463 (volatile uint32_t *)&sq->wqes[idx]);
467 * Update ASO objects upon completion.
470 * Shared device context.
472 * Number of completed ASO objects.
475 mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
477 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
478 struct mlx5_aso_sq *sq = &mng->aso_sq;
479 struct mlx5_age_info *age_info;
480 const uint16_t size = 1 << sq->log_desc_n;
481 const uint16_t mask = size - 1;
482 const uint64_t curr = MLX5_CURR_TIME_SEC;
483 uint16_t expected = AGE_CANDIDATE;
486 for (i = 0; i < n; ++i) {
487 uint16_t idx = (sq->ci + i) & mask;
488 struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
489 uint64_t diff = curr - pool->time_of_last_age_check;
490 uint64_t *addr = sq->mr.buf;
493 addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
494 pool->time_of_last_age_check = curr;
495 for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
496 struct mlx5_aso_age_action *act = &pool->actions[j];
497 struct mlx5_age_param *ap = &act->age_params;
503 if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
508 u8addr = (uint8_t *)addr;
509 hit = (u8addr[byte] >> offset) & 0x1;
511 __atomic_store_n(&ap->sec_since_last_hit, 0,
514 struct mlx5_priv *priv;
516 __atomic_fetch_add(&ap->sec_since_last_hit,
517 diff, __ATOMIC_RELAXED);
518 /* If timeout passed add to aged-out list. */
519 if (ap->sec_since_last_hit <= ap->timeout)
522 rte_eth_devices[ap->port_id].data->dev_private;
523 age_info = GET_PORT_AGE_INFO(priv);
524 rte_spinlock_lock(&age_info->aged_sl);
525 if (__atomic_compare_exchange_n(&ap->state,
531 LIST_INSERT_HEAD(&age_info->aged_aso,
533 MLX5_AGE_SET(age_info,
536 rte_spinlock_unlock(&age_info->aged_sl);
540 mlx5_age_event_prepare(sh);
544 * Handle completions from WQEs sent to ASO SQ.
547 * Shared device context.
550 * Number of CQEs handled.
553 mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
555 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
556 struct mlx5_aso_sq *sq = &mng->aso_sq;
557 struct mlx5_aso_cq *cq = &sq->cq;
558 volatile struct mlx5_cqe *restrict cqe;
559 const unsigned int cq_size = 1 << cq->log_desc_n;
560 const unsigned int mask = cq_size - 1;
562 uint32_t next_idx = cq->cq_ci & mask;
563 const uint16_t max = (uint16_t)(sq->pi - sq->ci);
570 next_idx = (cq->cq_ci + 1) & mask;
571 rte_prefetch0(&cq->cqes[next_idx]);
572 cqe = &cq->cqes[idx];
573 ret = check_cqe(cqe, cq_size, cq->cq_ci);
575 * Be sure owner read is done before any other cookie field or
579 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
580 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
582 mlx5_aso_cqe_err_handle(sq);
584 i += sq->elts[(sq->ci + i) & mask].burst_size;
589 mlx5_aso_age_action_update(sh, i);
592 cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
598 * Periodically read CQEs and send WQEs to ASO SQ.
601 * Shared device context containing the ASO SQ.
604 mlx5_flow_aso_alarm(void *arg)
606 struct mlx5_dev_ctx_shared *sh = arg;
607 struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
611 rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
612 n = sh->aso_age_mng->next;
613 rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
614 mlx5_aso_completion_handle(sh);
616 /* End of loop: wait 1 second. */
620 mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
621 if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
622 DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
626 * API to start ASO access using ASO SQ.
629 * Pointer to shared device context.
632 * 0 on success, a negative errno value otherwise and rte_errno is set.
635 mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh)
637 if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
638 DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
645 * API to stop ASO access using ASO SQ.
648 * Pointer to shared device context.
651 * 0 on success, a negative errno value otherwise and rte_errno is set.
654 mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh)
658 if (!sh->aso_age_mng->aso_sq.sq)
662 rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
663 if (rte_errno != EINPROGRESS)