a75adc89024dbb051b85e3a7e30509306474e972
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_age.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 #include <mlx5_prm.h>
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_eal_paging.h>
8
9 #include <mlx5_malloc.h>
10 #include <mlx5_common_os.h>
11 #include <mlx5_common_devx.h>
12
13 #include "mlx5.h"
14 #include "mlx5_flow.h"
15
16
17 /**
18  * Destroy Completion Queue used for ASO access.
19  *
20  * @param[in] cq
21  *   ASO CQ to destroy.
22  */
23 static void
24 mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
25 {
26         if (cq->cq_obj.cq)
27                 mlx5_devx_cq_destroy(&cq->cq_obj);
28         memset(cq, 0, sizeof(*cq));
29 }
30
31 /**
32  * Create Completion Queue used for ASO access.
33  *
34  * @param[in] ctx
35  *   Context returned from mlx5 open_device() glue function.
36  * @param[in/out] cq
37  *   Pointer to CQ to create.
38  * @param[in] log_desc_n
39  *   Log of number of descriptors in queue.
40  * @param[in] socket
41  *   Socket to use for allocation.
42  * @param[in] uar_page_id
43  *   UAR page ID to use.
44  *
45  * @return
46  *   0 on success, a negative errno value otherwise and rte_errno is set.
47  */
48 static int
49 mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
50                    int socket, int uar_page_id)
51 {
52         struct mlx5_devx_cq_attr attr = {
53                 .uar_page_id = uar_page_id,
54         };
55
56         cq->log_desc_n = log_desc_n;
57         cq->cq_ci = 0;
58         return mlx5_devx_cq_create(ctx, &cq->cq_obj, log_desc_n, &attr, socket);
59 }
60
61 /**
62  * Free MR resources.
63  *
64  * @param[in] mr
65  *   MR to free.
66  */
67 static void
68 mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
69 {
70         claim_zero(mlx5_devx_cmd_destroy(mr->mkey));
71         if (!mr->is_indirect && mr->umem)
72                 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
73         mlx5_free(mr->buf);
74         memset(mr, 0, sizeof(*mr));
75 }
76
77 /**
78  * Register Memory Region.
79  *
80  * @param[in] ctx
81  *   Context returned from mlx5 open_device() glue function.
82  * @param[in] length
83  *   Size of MR buffer.
84  * @param[in/out] mr
85  *   Pointer to MR to create.
86  * @param[in] socket
87  *   Socket to use for allocation.
88  * @param[in] pdn
89  *   Protection Domain number to use.
90  *
91  * @return
92  *   0 on success, a negative errno value otherwise and rte_errno is set.
93  */
94 static int
95 mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
96                      int socket, int pdn)
97 {
98         struct mlx5_devx_mkey_attr mkey_attr;
99
100         mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
101                               socket);
102         if (!mr->buf) {
103                 DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx.");
104                 return -1;
105         }
106         mr->umem = mlx5_os_umem_reg(ctx, mr->buf, length,
107                                                  IBV_ACCESS_LOCAL_WRITE);
108         if (!mr->umem) {
109                 DRV_LOG(ERR, "Failed to register Umem for MR by Devx.");
110                 goto error;
111         }
112         mkey_attr.addr = (uintptr_t)mr->buf;
113         mkey_attr.size = length;
114         mkey_attr.umem_id = mlx5_os_get_umem_id(mr->umem);
115         mkey_attr.pd = pdn;
116         mkey_attr.pg_access = 1;
117         mkey_attr.klm_array = NULL;
118         mkey_attr.klm_num = 0;
119         mkey_attr.relaxed_ordering_read = 0;
120         mkey_attr.relaxed_ordering_write = 0;
121         mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr);
122         if (!mr->mkey) {
123                 DRV_LOG(ERR, "Failed to create direct Mkey.");
124                 goto error;
125         }
126         mr->length = length;
127         mr->is_indirect = false;
128         return 0;
129 error:
130         if (mr->umem)
131                 claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
132         mlx5_free(mr->buf);
133         return -1;
134 }
135
136 /**
137  * Destroy Send Queue used for ASO access.
138  *
139  * @param[in] sq
140  *   ASO SQ to destroy.
141  */
142 static void
143 mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
144 {
145         if (sq->wqe_umem) {
146                 mlx5_glue->devx_umem_dereg(sq->wqe_umem);
147                 sq->wqe_umem = NULL;
148         }
149         if (sq->umem_buf) {
150                 mlx5_free((void *)(uintptr_t)sq->umem_buf);
151                 sq->umem_buf = NULL;
152         }
153         if (sq->sq) {
154                 mlx5_devx_cmd_destroy(sq->sq);
155                 sq->sq = NULL;
156         }
157         mlx5_aso_cq_destroy(&sq->cq);
158         mlx5_aso_devx_dereg_mr(&sq->mr);
159         memset(sq, 0, sizeof(*sq));
160 }
161
162 /**
163  * Initialize Send Queue used for ASO access.
164  *
165  * @param[in] sq
166  *   ASO SQ to initialize.
167  */
168 static void
169 mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
170 {
171         volatile struct mlx5_aso_wqe *restrict wqe;
172         int i;
173         int size = 1 << sq->log_desc_n;
174         uint64_t addr;
175
176         /* All the next fields state should stay constant. */
177         for (i = 0, wqe = &sq->wqes[0]; i < size; ++i, ++wqe) {
178                 wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
179                                                           (sizeof(*wqe) >> 4));
180                 wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id);
181                 addr = (uint64_t)((uint64_t *)sq->mr.buf + i *
182                                             MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
183                 wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
184                 wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
185                 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
186                         (0u |
187                          (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
188                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
189                          (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
190                          (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
191                 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
192         }
193 }
194
195 /**
196  * Create Send Queue used for ASO access.
197  *
198  * @param[in] ctx
199  *   Context returned from mlx5 open_device() glue function.
200  * @param[in/out] sq
201  *   Pointer to SQ to create.
202  * @param[in] socket
203  *   Socket to use for allocation.
204  * @param[in] uar
205  *   User Access Region object.
206  * @param[in] pdn
207  *   Protection Domain number to use.
208  * @param[in] log_desc_n
209  *   Log of number of descriptors in queue.
210  *
211  * @return
212  *   0 on success, a negative errno value otherwise and rte_errno is set.
213  */
214 static int
215 mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
216                    void *uar, uint32_t pdn,  uint16_t log_desc_n)
217 {
218         struct mlx5_devx_create_sq_attr attr = { 0 };
219         struct mlx5_devx_modify_sq_attr modify_attr = { 0 };
220         size_t pgsize = rte_mem_page_size();
221         struct mlx5_devx_wq_attr *wq_attr = &attr.wq_attr;
222         uint32_t sq_desc_n = 1 << log_desc_n;
223         uint32_t wq_size = sizeof(struct mlx5_aso_wqe) * sq_desc_n;
224         int ret;
225
226         if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
227                                  sq_desc_n, &sq->mr, socket, pdn))
228                 return -1;
229         if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
230                                mlx5_os_get_devx_uar_page_id(uar)))
231                 goto error;
232         sq->log_desc_n = log_desc_n;
233         sq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size +
234                                    sizeof(*sq->db_rec) * 2, 4096, socket);
235         if (!sq->umem_buf) {
236                 DRV_LOG(ERR, "Can't allocate wqe buffer.");
237                 rte_errno = ENOMEM;
238                 goto error;
239         }
240         sq->wqe_umem = mlx5_os_umem_reg(ctx,
241                                                 (void *)(uintptr_t)sq->umem_buf,
242                                                 wq_size +
243                                                 sizeof(*sq->db_rec) * 2,
244                                                 IBV_ACCESS_LOCAL_WRITE);
245         if (!sq->wqe_umem) {
246                 DRV_LOG(ERR, "Failed to register umem for SQ.");
247                 rte_errno = ENOMEM;
248                 goto error;
249         }
250         attr.state = MLX5_SQC_STATE_RST;
251         attr.tis_lst_sz = 0;
252         attr.tis_num = 0;
253         attr.user_index = 0xFFFF;
254         attr.cqn = sq->cq.cq_obj.cq->id;
255         wq_attr->uar_page = mlx5_os_get_devx_uar_page_id(uar);
256         wq_attr->pd = pdn;
257         wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC;
258         wq_attr->log_wq_pg_sz = rte_log2_u32(pgsize);
259         wq_attr->wq_umem_id = mlx5_os_get_umem_id(sq->wqe_umem);
260         wq_attr->wq_umem_offset = 0;
261         wq_attr->wq_umem_valid = 1;
262         wq_attr->log_wq_stride = 6;
263         wq_attr->log_wq_sz = rte_log2_u32(wq_size) - 6;
264         wq_attr->dbr_umem_id = wq_attr->wq_umem_id;
265         wq_attr->dbr_addr = wq_size;
266         wq_attr->dbr_umem_valid = 1;
267         sq->sq = mlx5_devx_cmd_create_sq(ctx, &attr);
268         if (!sq->sq) {
269                 DRV_LOG(ERR, "Can't create sq object.");
270                 rte_errno  = ENOMEM;
271                 goto error;
272         }
273         modify_attr.state = MLX5_SQC_STATE_RDY;
274         ret = mlx5_devx_cmd_modify_sq(sq->sq, &modify_attr);
275         if (ret) {
276                 DRV_LOG(ERR, "Can't change sq state to ready.");
277                 rte_errno  = ENOMEM;
278                 goto error;
279         }
280         sq->pi = 0;
281         sq->head = 0;
282         sq->tail = 0;
283         sq->sqn = sq->sq->id;
284         sq->db_rec = RTE_PTR_ADD(sq->umem_buf, (uintptr_t)(wq_attr->dbr_addr));
285         sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar);
286         mlx5_aso_init_sq(sq);
287         return 0;
288 error:
289         mlx5_aso_destroy_sq(sq);
290         return -1;
291 }
292
293 /**
294  * API to create and initialize Send Queue used for ASO access.
295  *
296  * @param[in] sh
297  *   Pointer to shared device context.
298  *
299  * @return
300  *   0 on success, a negative errno value otherwise and rte_errno is set.
301  */
302 int
303 mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh)
304 {
305         return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
306                                   sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC);
307 }
308
309 /**
310  * API to destroy Send Queue used for ASO access.
311  *
312  * @param[in] sh
313  *   Pointer to shared device context.
314  */
315 void
316 mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh)
317 {
318         mlx5_aso_destroy_sq(&sh->aso_age_mng->aso_sq);
319 }
320
321 /**
322  * Write a burst of WQEs to ASO SQ.
323  *
324  * @param[in] mng
325  *   ASO management data, contains the SQ.
326  * @param[in] n
327  *   Index of the last valid pool.
328  *
329  * @return
330  *   Number of WQEs in burst.
331  */
332 static uint16_t
333 mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
334 {
335         volatile struct mlx5_aso_wqe *wqe;
336         struct mlx5_aso_sq *sq = &mng->aso_sq;
337         struct mlx5_aso_age_pool *pool;
338         uint16_t size = 1 << sq->log_desc_n;
339         uint16_t mask = size - 1;
340         uint16_t max;
341         uint16_t start_head = sq->head;
342
343         max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next);
344         if (unlikely(!max))
345                 return 0;
346         sq->elts[start_head & mask].burst_size = max;
347         do {
348                 wqe = &sq->wqes[sq->head & mask];
349                 rte_prefetch0(&sq->wqes[(sq->head + 1) & mask]);
350                 /* Fill next WQE. */
351                 rte_spinlock_lock(&mng->resize_sl);
352                 pool = mng->pools[sq->next];
353                 rte_spinlock_unlock(&mng->resize_sl);
354                 sq->elts[sq->head & mask].pool = pool;
355                 wqe->general_cseg.misc =
356                                 rte_cpu_to_be_32(((struct mlx5_devx_obj *)
357                                                  (pool->flow_hit_aso_obj))->id);
358                 wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
359                                                          MLX5_COMP_MODE_OFFSET);
360                 wqe->general_cseg.opcode = rte_cpu_to_be_32
361                                                 (MLX5_OPCODE_ACCESS_ASO |
362                                                  (ASO_OPC_MOD_FLOW_HIT <<
363                                                   WQE_CSEG_OPC_MOD_OFFSET) |
364                                                  (sq->pi <<
365                                                   WQE_CSEG_WQE_INDEX_OFFSET));
366                 sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
367                 sq->head++;
368                 sq->next++;
369                 max--;
370         } while (max);
371         wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
372                                                          MLX5_COMP_MODE_OFFSET);
373         rte_io_wmb();
374         sq->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
375         rte_wmb();
376         *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
377         rte_wmb();
378         return sq->elts[start_head & mask].burst_size;
379 }
380
381 /**
382  * Debug utility function. Dump contents of error CQE and WQE.
383  *
384  * @param[in] cqe
385  *   Error CQE to dump.
386  * @param[in] wqe
387  *   Error WQE to dump.
388  */
389 static void
390 mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
391 {
392         int i;
393
394         DRV_LOG(ERR, "Error cqe:");
395         for (i = 0; i < 16; i += 4)
396                 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
397                         cqe[i + 2], cqe[i + 3]);
398         DRV_LOG(ERR, "\nError wqe:");
399         for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
400                 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
401                         wqe[i + 2], wqe[i + 3]);
402 }
403
404 /**
405  * Handle case of error CQE.
406  *
407  * @param[in] sq
408  *   ASO SQ to use.
409  */
410 static void
411 mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
412 {
413         struct mlx5_aso_cq *cq = &sq->cq;
414         uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
415         volatile struct mlx5_err_cqe *cqe =
416                         (volatile struct mlx5_err_cqe *)&cq->cq_obj.cqes[idx];
417
418         cq->errors++;
419         idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
420         mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
421                                  (volatile uint32_t *)&sq->wqes[idx]);
422 }
423
424 /**
425  * Update ASO objects upon completion.
426  *
427  * @param[in] sh
428  *   Shared device context.
429  * @param[in] n
430  *   Number of completed ASO objects.
431  */
432 static void
433 mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
434 {
435         struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
436         struct mlx5_aso_sq *sq = &mng->aso_sq;
437         struct mlx5_age_info *age_info;
438         const uint16_t size = 1 << sq->log_desc_n;
439         const uint16_t mask = size - 1;
440         const uint64_t curr = MLX5_CURR_TIME_SEC;
441         uint16_t expected = AGE_CANDIDATE;
442         uint16_t i;
443
444         for (i = 0; i < n; ++i) {
445                 uint16_t idx = (sq->tail + i) & mask;
446                 struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
447                 uint64_t diff = curr - pool->time_of_last_age_check;
448                 uint64_t *addr = sq->mr.buf;
449                 int j;
450
451                 addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
452                 pool->time_of_last_age_check = curr;
453                 for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
454                         struct mlx5_aso_age_action *act = &pool->actions[j];
455                         struct mlx5_age_param *ap = &act->age_params;
456                         uint8_t byte;
457                         uint8_t offset;
458                         uint8_t *u8addr;
459                         uint8_t hit;
460
461                         if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
462                                             AGE_CANDIDATE)
463                                 continue;
464                         byte = 63 - (j / 8);
465                         offset = j % 8;
466                         u8addr = (uint8_t *)addr;
467                         hit = (u8addr[byte] >> offset) & 0x1;
468                         if (hit) {
469                                 __atomic_store_n(&ap->sec_since_last_hit, 0,
470                                                  __ATOMIC_RELAXED);
471                         } else {
472                                 struct mlx5_priv *priv;
473
474                                 __atomic_fetch_add(&ap->sec_since_last_hit,
475                                                    diff, __ATOMIC_RELAXED);
476                                 /* If timeout passed add to aged-out list. */
477                                 if (ap->sec_since_last_hit <= ap->timeout)
478                                         continue;
479                                 priv =
480                                 rte_eth_devices[ap->port_id].data->dev_private;
481                                 age_info = GET_PORT_AGE_INFO(priv);
482                                 rte_spinlock_lock(&age_info->aged_sl);
483                                 if (__atomic_compare_exchange_n(&ap->state,
484                                                                 &expected,
485                                                                 AGE_TMOUT,
486                                                                 false,
487                                                                __ATOMIC_RELAXED,
488                                                             __ATOMIC_RELAXED)) {
489                                         LIST_INSERT_HEAD(&age_info->aged_aso,
490                                                          act, next);
491                                         MLX5_AGE_SET(age_info,
492                                                      MLX5_AGE_EVENT_NEW);
493                                 }
494                                 rte_spinlock_unlock(&age_info->aged_sl);
495                         }
496                 }
497         }
498         mlx5_age_event_prepare(sh);
499 }
500
501 /**
502  * Handle completions from WQEs sent to ASO SQ.
503  *
504  * @param[in] sh
505  *   Shared device context.
506  *
507  * @return
508  *   Number of CQEs handled.
509  */
510 static uint16_t
511 mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
512 {
513         struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
514         struct mlx5_aso_sq *sq = &mng->aso_sq;
515         struct mlx5_aso_cq *cq = &sq->cq;
516         volatile struct mlx5_cqe *restrict cqe;
517         const unsigned int cq_size = 1 << cq->log_desc_n;
518         const unsigned int mask = cq_size - 1;
519         uint32_t idx;
520         uint32_t next_idx = cq->cq_ci & mask;
521         const uint16_t max = (uint16_t)(sq->head - sq->tail);
522         uint16_t i = 0;
523         int ret;
524         if (unlikely(!max))
525                 return 0;
526         do {
527                 idx = next_idx;
528                 next_idx = (cq->cq_ci + 1) & mask;
529                 rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
530                 cqe = &cq->cq_obj.cqes[idx];
531                 ret = check_cqe(cqe, cq_size, cq->cq_ci);
532                 /*
533                  * Be sure owner read is done before any other cookie field or
534                  * opaque field.
535                  */
536                 rte_io_rmb();
537                 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
538                         if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
539                                 break;
540                         mlx5_aso_cqe_err_handle(sq);
541                 } else {
542                         i += sq->elts[(sq->tail + i) & mask].burst_size;
543                 }
544                 cq->cq_ci++;
545         } while (1);
546         if (likely(i)) {
547                 mlx5_aso_age_action_update(sh, i);
548                 sq->tail += i;
549                 rte_io_wmb();
550                 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
551         }
552         return i;
553 }
554
555 /**
556  * Periodically read CQEs and send WQEs to ASO SQ.
557  *
558  * @param[in] arg
559  *   Shared device context containing the ASO SQ.
560  */
561 static void
562 mlx5_flow_aso_alarm(void *arg)
563 {
564         struct mlx5_dev_ctx_shared *sh = arg;
565         struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
566         uint32_t us = 100u;
567         uint16_t n;
568
569         rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
570         n = sh->aso_age_mng->next;
571         rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
572         mlx5_aso_completion_handle(sh);
573         if (sq->next == n) {
574                 /* End of loop: wait 1 second. */
575                 us = US_PER_S;
576                 sq->next = 0;
577         }
578         mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
579         if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
580                 DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
581 }
582
583 /**
584  * API to start ASO access using ASO SQ.
585  *
586  * @param[in] sh
587  *   Pointer to shared device context.
588  *
589  * @return
590  *   0 on success, a negative errno value otherwise and rte_errno is set.
591  */
592 int
593 mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh)
594 {
595         if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
596                 DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
597                 return -rte_errno;
598         }
599         return 0;
600 }
601
602 /**
603  * API to stop ASO access using ASO SQ.
604  *
605  * @param[in] sh
606  *   Pointer to shared device context.
607  *
608  * @return
609  *   0 on success, a negative errno value otherwise and rte_errno is set.
610  */
611 int
612 mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh)
613 {
614         int retries = 1024;
615
616         if (!sh->aso_age_mng->aso_sq.sq)
617                 return -EINVAL;
618         rte_errno = 0;
619         while (--retries) {
620                 rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
621                 if (rte_errno != EINPROGRESS)
622                         break;
623                 rte_pause();
624         }
625         return -rte_errno;
626 }