1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_eal_paging.h>
9 #include <mlx5_malloc.h>
10 #include <mlx5_common_os.h>
11 #include <mlx5_common_devx.h>
14 #include "mlx5_flow.h"
18 * Destroy Completion Queue used for ASO access.
24 mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
27 mlx5_devx_cq_destroy(&cq->cq_obj);
28 memset(cq, 0, sizeof(*cq));
32 * Create Completion Queue used for ASO access.
35 * Context returned from mlx5 open_device() glue function.
37 * Pointer to CQ to create.
38 * @param[in] log_desc_n
39 * Log of number of descriptors in queue.
41 * Socket to use for allocation.
42 * @param[in] uar_page_id
46 * 0 on success, a negative errno value otherwise and rte_errno is set.
49 mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
50 int socket, int uar_page_id)
52 struct mlx5_devx_cq_attr attr = {
53 .uar_page_id = uar_page_id,
56 cq->log_desc_n = log_desc_n;
58 return mlx5_devx_cq_create(ctx, &cq->cq_obj, log_desc_n, &attr, socket);
68 mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
70 claim_zero(mlx5_devx_cmd_destroy(mr->mkey));
71 if (!mr->is_indirect && mr->umem)
72 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
74 memset(mr, 0, sizeof(*mr));
78 * Register Memory Region.
81 * Context returned from mlx5 open_device() glue function.
85 * Pointer to MR to create.
87 * Socket to use for allocation.
89 * Protection Domain number to use.
92 * 0 on success, a negative errno value otherwise and rte_errno is set.
95 mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
98 struct mlx5_devx_mkey_attr mkey_attr;
100 mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
103 DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx.");
106 mr->umem = mlx5_os_umem_reg(ctx, mr->buf, length,
107 IBV_ACCESS_LOCAL_WRITE);
109 DRV_LOG(ERR, "Failed to register Umem for MR by Devx.");
112 mkey_attr.addr = (uintptr_t)mr->buf;
113 mkey_attr.size = length;
114 mkey_attr.umem_id = mlx5_os_get_umem_id(mr->umem);
116 mkey_attr.pg_access = 1;
117 mkey_attr.klm_array = NULL;
118 mkey_attr.klm_num = 0;
119 mkey_attr.relaxed_ordering_read = 0;
120 mkey_attr.relaxed_ordering_write = 0;
121 mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr);
123 DRV_LOG(ERR, "Failed to create direct Mkey.");
127 mr->is_indirect = false;
131 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
137 * Destroy Send Queue used for ASO access.
143 mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
146 mlx5_glue->devx_umem_dereg(sq->wqe_umem);
150 mlx5_free((void *)(uintptr_t)sq->umem_buf);
154 mlx5_devx_cmd_destroy(sq->sq);
157 mlx5_aso_cq_destroy(&sq->cq);
158 mlx5_aso_devx_dereg_mr(&sq->mr);
159 memset(sq, 0, sizeof(*sq));
163 * Initialize Send Queue used for ASO access.
166 * ASO SQ to initialize.
169 mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
171 volatile struct mlx5_aso_wqe *restrict wqe;
173 int size = 1 << sq->log_desc_n;
176 /* All the next fields state should stay constant. */
177 for (i = 0, wqe = &sq->wqes[0]; i < size; ++i, ++wqe) {
178 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
179 (sizeof(*wqe) >> 4));
180 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id);
181 addr = (uint64_t)((uint64_t *)sq->mr.buf + i *
182 MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
183 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
184 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
185 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
187 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
188 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
189 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
190 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
191 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
196 * Create Send Queue used for ASO access.
199 * Context returned from mlx5 open_device() glue function.
201 * Pointer to SQ to create.
203 * Socket to use for allocation.
205 * User Access Region object.
207 * Protection Domain number to use.
208 * @param[in] log_desc_n
209 * Log of number of descriptors in queue.
212 * 0 on success, a negative errno value otherwise and rte_errno is set.
215 mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
216 void *uar, uint32_t pdn, uint16_t log_desc_n)
218 struct mlx5_devx_create_sq_attr attr = { 0 };
219 struct mlx5_devx_modify_sq_attr modify_attr = { 0 };
220 size_t pgsize = rte_mem_page_size();
221 struct mlx5_devx_wq_attr *wq_attr = &attr.wq_attr;
222 uint32_t sq_desc_n = 1 << log_desc_n;
223 uint32_t wq_size = sizeof(struct mlx5_aso_wqe) * sq_desc_n;
226 if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
227 sq_desc_n, &sq->mr, socket, pdn))
229 if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
230 mlx5_os_get_devx_uar_page_id(uar)))
232 sq->log_desc_n = log_desc_n;
233 sq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size +
234 sizeof(*sq->db_rec) * 2, 4096, socket);
236 DRV_LOG(ERR, "Can't allocate wqe buffer.");
240 sq->wqe_umem = mlx5_os_umem_reg(ctx,
241 (void *)(uintptr_t)sq->umem_buf,
243 sizeof(*sq->db_rec) * 2,
244 IBV_ACCESS_LOCAL_WRITE);
246 DRV_LOG(ERR, "Failed to register umem for SQ.");
250 attr.state = MLX5_SQC_STATE_RST;
253 attr.user_index = 0xFFFF;
254 attr.cqn = sq->cq.cq_obj.cq->id;
255 wq_attr->uar_page = mlx5_os_get_devx_uar_page_id(uar);
257 wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC;
258 wq_attr->log_wq_pg_sz = rte_log2_u32(pgsize);
259 wq_attr->wq_umem_id = mlx5_os_get_umem_id(sq->wqe_umem);
260 wq_attr->wq_umem_offset = 0;
261 wq_attr->wq_umem_valid = 1;
262 wq_attr->log_wq_stride = 6;
263 wq_attr->log_wq_sz = rte_log2_u32(wq_size) - 6;
264 wq_attr->dbr_umem_id = wq_attr->wq_umem_id;
265 wq_attr->dbr_addr = wq_size;
266 wq_attr->dbr_umem_valid = 1;
267 sq->sq = mlx5_devx_cmd_create_sq(ctx, &attr);
269 DRV_LOG(ERR, "Can't create sq object.");
273 modify_attr.state = MLX5_SQC_STATE_RDY;
274 ret = mlx5_devx_cmd_modify_sq(sq->sq, &modify_attr);
276 DRV_LOG(ERR, "Can't change sq state to ready.");
283 sq->sqn = sq->sq->id;
284 sq->db_rec = RTE_PTR_ADD(sq->umem_buf, (uintptr_t)(wq_attr->dbr_addr));
285 sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar);
286 mlx5_aso_init_sq(sq);
289 mlx5_aso_destroy_sq(sq);
294 * API to create and initialize Send Queue used for ASO access.
297 * Pointer to shared device context.
300 * 0 on success, a negative errno value otherwise and rte_errno is set.
303 mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh)
305 return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
306 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC);
310 * API to destroy Send Queue used for ASO access.
313 * Pointer to shared device context.
316 mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh)
318 mlx5_aso_destroy_sq(&sh->aso_age_mng->aso_sq);
322 * Write a burst of WQEs to ASO SQ.
325 * ASO management data, contains the SQ.
327 * Index of the last valid pool.
330 * Number of WQEs in burst.
333 mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
335 volatile struct mlx5_aso_wqe *wqe;
336 struct mlx5_aso_sq *sq = &mng->aso_sq;
337 struct mlx5_aso_age_pool *pool;
338 uint16_t size = 1 << sq->log_desc_n;
339 uint16_t mask = size - 1;
341 uint16_t start_head = sq->head;
343 max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next);
346 sq->elts[start_head & mask].burst_size = max;
348 wqe = &sq->wqes[sq->head & mask];
349 rte_prefetch0(&sq->wqes[(sq->head + 1) & mask]);
351 rte_spinlock_lock(&mng->resize_sl);
352 pool = mng->pools[sq->next];
353 rte_spinlock_unlock(&mng->resize_sl);
354 sq->elts[sq->head & mask].pool = pool;
355 wqe->general_cseg.misc =
356 rte_cpu_to_be_32(((struct mlx5_devx_obj *)
357 (pool->flow_hit_aso_obj))->id);
358 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
359 MLX5_COMP_MODE_OFFSET);
360 wqe->general_cseg.opcode = rte_cpu_to_be_32
361 (MLX5_OPCODE_ACCESS_ASO |
362 (ASO_OPC_MOD_FLOW_HIT <<
363 WQE_CSEG_OPC_MOD_OFFSET) |
365 WQE_CSEG_WQE_INDEX_OFFSET));
366 sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
371 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
372 MLX5_COMP_MODE_OFFSET);
374 sq->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
376 *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
378 return sq->elts[start_head & mask].burst_size;
382 * Debug utility function. Dump contents of error CQE and WQE.
390 mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
394 DRV_LOG(ERR, "Error cqe:");
395 for (i = 0; i < 16; i += 4)
396 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
397 cqe[i + 2], cqe[i + 3]);
398 DRV_LOG(ERR, "\nError wqe:");
399 for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
400 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
401 wqe[i + 2], wqe[i + 3]);
405 * Handle case of error CQE.
411 mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
413 struct mlx5_aso_cq *cq = &sq->cq;
414 uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
415 volatile struct mlx5_err_cqe *cqe =
416 (volatile struct mlx5_err_cqe *)&cq->cq_obj.cqes[idx];
419 idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
420 mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
421 (volatile uint32_t *)&sq->wqes[idx]);
425 * Update ASO objects upon completion.
428 * Shared device context.
430 * Number of completed ASO objects.
433 mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
435 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
436 struct mlx5_aso_sq *sq = &mng->aso_sq;
437 struct mlx5_age_info *age_info;
438 const uint16_t size = 1 << sq->log_desc_n;
439 const uint16_t mask = size - 1;
440 const uint64_t curr = MLX5_CURR_TIME_SEC;
441 uint16_t expected = AGE_CANDIDATE;
444 for (i = 0; i < n; ++i) {
445 uint16_t idx = (sq->tail + i) & mask;
446 struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
447 uint64_t diff = curr - pool->time_of_last_age_check;
448 uint64_t *addr = sq->mr.buf;
451 addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
452 pool->time_of_last_age_check = curr;
453 for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
454 struct mlx5_aso_age_action *act = &pool->actions[j];
455 struct mlx5_age_param *ap = &act->age_params;
461 if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
466 u8addr = (uint8_t *)addr;
467 hit = (u8addr[byte] >> offset) & 0x1;
469 __atomic_store_n(&ap->sec_since_last_hit, 0,
472 struct mlx5_priv *priv;
474 __atomic_fetch_add(&ap->sec_since_last_hit,
475 diff, __ATOMIC_RELAXED);
476 /* If timeout passed add to aged-out list. */
477 if (ap->sec_since_last_hit <= ap->timeout)
480 rte_eth_devices[ap->port_id].data->dev_private;
481 age_info = GET_PORT_AGE_INFO(priv);
482 rte_spinlock_lock(&age_info->aged_sl);
483 if (__atomic_compare_exchange_n(&ap->state,
489 LIST_INSERT_HEAD(&age_info->aged_aso,
491 MLX5_AGE_SET(age_info,
494 rte_spinlock_unlock(&age_info->aged_sl);
498 mlx5_age_event_prepare(sh);
502 * Handle completions from WQEs sent to ASO SQ.
505 * Shared device context.
508 * Number of CQEs handled.
511 mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
513 struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
514 struct mlx5_aso_sq *sq = &mng->aso_sq;
515 struct mlx5_aso_cq *cq = &sq->cq;
516 volatile struct mlx5_cqe *restrict cqe;
517 const unsigned int cq_size = 1 << cq->log_desc_n;
518 const unsigned int mask = cq_size - 1;
520 uint32_t next_idx = cq->cq_ci & mask;
521 const uint16_t max = (uint16_t)(sq->head - sq->tail);
528 next_idx = (cq->cq_ci + 1) & mask;
529 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
530 cqe = &cq->cq_obj.cqes[idx];
531 ret = check_cqe(cqe, cq_size, cq->cq_ci);
533 * Be sure owner read is done before any other cookie field or
537 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
538 if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
540 mlx5_aso_cqe_err_handle(sq);
542 i += sq->elts[(sq->tail + i) & mask].burst_size;
547 mlx5_aso_age_action_update(sh, i);
550 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
556 * Periodically read CQEs and send WQEs to ASO SQ.
559 * Shared device context containing the ASO SQ.
562 mlx5_flow_aso_alarm(void *arg)
564 struct mlx5_dev_ctx_shared *sh = arg;
565 struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
569 rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
570 n = sh->aso_age_mng->next;
571 rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
572 mlx5_aso_completion_handle(sh);
574 /* End of loop: wait 1 second. */
578 mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
579 if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
580 DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
584 * API to start ASO access using ASO SQ.
587 * Pointer to shared device context.
590 * 0 on success, a negative errno value otherwise and rte_errno is set.
593 mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh)
595 if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
596 DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
603 * API to stop ASO access using ASO SQ.
606 * Pointer to shared device context.
609 * 0 on success, a negative errno value otherwise and rte_errno is set.
612 mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh)
616 if (!sh->aso_age_mng->aso_sq.sq)
620 rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
621 if (rte_errno != EINPROGRESS)